summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/APValue.cpp108
-rw-r--r--lib/AST/ASTConsumer.cpp19
-rw-r--r--lib/AST/ASTContext.cpp3332
-rw-r--r--lib/AST/Builtins.cpp290
-rw-r--r--lib/AST/CFG.cpp1913
-rw-r--r--lib/AST/CMakeLists.txt32
-rw-r--r--lib/AST/Decl.cpp630
-rw-r--r--lib/AST/DeclBase.cpp756
-rw-r--r--lib/AST/DeclCXX.cpp462
-rw-r--r--lib/AST/DeclGroup.cpp37
-rw-r--r--lib/AST/DeclObjC.cpp693
-rw-r--r--lib/AST/DeclPrinter.cpp722
-rw-r--r--lib/AST/DeclTemplate.cpp324
-rw-r--r--lib/AST/DeclarationName.cpp355
-rw-r--r--lib/AST/Expr.cpp2059
-rw-r--r--lib/AST/ExprCXX.cpp424
-rw-r--r--lib/AST/ExprConstant.cpp1723
-rw-r--r--lib/AST/InheritViz.cpp168
-rw-r--r--lib/AST/Makefile22
-rw-r--r--lib/AST/NestedNameSpecifier.cpp160
-rw-r--r--lib/AST/ParentMap.cpp94
-rw-r--r--lib/AST/Stmt.cpp587
-rw-r--r--lib/AST/StmtDumper.cpp542
-rw-r--r--lib/AST/StmtIterator.cpp155
-rw-r--r--lib/AST/StmtPrinter.cpp1239
-rw-r--r--lib/AST/StmtViz.cpp61
-rw-r--r--lib/AST/TemplateName.cpp65
-rw-r--r--lib/AST/Type.cpp1658
-rw-r--r--lib/Analysis/BasicConstraintManager.cpp342
-rw-r--r--lib/Analysis/BasicObjCFoundationChecks.cpp492
-rw-r--r--lib/Analysis/BasicObjCFoundationChecks.h47
-rw-r--r--lib/Analysis/BasicStore.cpp637
-rw-r--r--lib/Analysis/BasicValueFactory.cpp264
-rw-r--r--lib/Analysis/BugReporter.cpp1697
-rw-r--r--lib/Analysis/CFRefCount.cpp3635
-rw-r--r--lib/Analysis/CMakeLists.txt36
-rw-r--r--lib/Analysis/CheckDeadStores.cpp259
-rw-r--r--lib/Analysis/CheckNSError.cpp231
-rw-r--r--lib/Analysis/CheckObjCDealloc.cpp257
-rw-r--r--lib/Analysis/CheckObjCInstMethSignature.cpp120
-rw-r--r--lib/Analysis/CheckObjCUnusedIVars.cpp111
-rw-r--r--lib/Analysis/Environment.cpp167
-rw-r--r--lib/Analysis/ExplodedGraph.cpp241
-rw-r--r--lib/Analysis/GRBlockCounter.cpp54
-rw-r--r--lib/Analysis/GRCoreEngine.cpp576
-rw-r--r--lib/Analysis/GRExprEngine.cpp3426
-rw-r--r--lib/Analysis/GRExprEngineInternalChecks.cpp961
-rw-r--r--lib/Analysis/GRSimpleVals.cpp416
-rw-r--r--lib/Analysis/GRSimpleVals.h86
-rw-r--r--lib/Analysis/GRState.cpp318
-rw-r--r--lib/Analysis/GRTransferFuncs.cpp28
-rw-r--r--lib/Analysis/LiveVariables.cpp359
-rw-r--r--lib/Analysis/Makefile22
-rw-r--r--lib/Analysis/MemRegion.cpp494
-rw-r--r--lib/Analysis/PathDiagnostic.cpp242
-rw-r--r--lib/Analysis/RangeConstraintManager.cpp363
-rw-r--r--lib/Analysis/RegionStore.cpp1304
-rw-r--r--lib/Analysis/SVals.cpp513
-rw-r--r--lib/Analysis/SimpleConstraintManager.cpp263
-rw-r--r--lib/Analysis/SimpleConstraintManager.h84
-rw-r--r--lib/Analysis/Store.cpp110
-rw-r--r--lib/Analysis/SymbolManager.cpp203
-rw-r--r--lib/Analysis/UninitializedValues.cpp312
-rw-r--r--lib/Basic/CMakeLists.txt24
-rw-r--r--lib/Basic/ConvertUTF.c547
-rw-r--r--lib/Basic/Diagnostic.cpp788
-rw-r--r--lib/Basic/FileManager.cpp302
-rw-r--r--lib/Basic/IdentifierTable.cpp388
-rw-r--r--lib/Basic/Makefile22
-rw-r--r--lib/Basic/SourceLocation.cpp125
-rw-r--r--lib/Basic/SourceManager.cpp943
-rw-r--r--lib/Basic/TargetInfo.cpp295
-rw-r--r--lib/Basic/Targets.cpp1500
-rw-r--r--lib/Basic/TokenKinds.cpp90
-rw-r--r--lib/CMakeLists.txt11
-rw-r--r--lib/CodeGen/ABIInfo.h133
-rw-r--r--lib/CodeGen/CGBlocks.cpp1037
-rw-r--r--lib/CodeGen/CGBlocks.h223
-rw-r--r--lib/CodeGen/CGBuilder.h26
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1037
-rw-r--r--lib/CodeGen/CGCXX.cpp454
-rw-r--r--lib/CodeGen/CGCXX.h36
-rw-r--r--lib/CodeGen/CGCall.cpp2196
-rw-r--r--lib/CodeGen/CGCall.h104
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp987
-rw-r--r--lib/CodeGen/CGDebugInfo.h126
-rw-r--r--lib/CodeGen/CGDecl.cpp489
-rw-r--r--lib/CodeGen/CGExpr.cpp1324
-rw-r--r--lib/CodeGen/CGExprAgg.cpp554
-rw-r--r--lib/CodeGen/CGExprComplex.cpp663
-rw-r--r--lib/CodeGen/CGExprConstant.cpp588
-rw-r--r--lib/CodeGen/CGExprScalar.cpp1575
-rw-r--r--lib/CodeGen/CGObjC.cpp644
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp1582
-rw-r--r--lib/CodeGen/CGObjCMac.cpp5780
-rw-r--r--lib/CodeGen/CGObjCRuntime.h206
-rw-r--r--lib/CodeGen/CGStmt.cpp1022
-rw-r--r--lib/CodeGen/CGValue.h323
-rw-r--r--lib/CodeGen/CMakeLists.txt24
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp714
-rw-r--r--lib/CodeGen/CodeGenFunction.h900
-rw-r--r--lib/CodeGen/CodeGenModule.cpp1543
-rw-r--r--lib/CodeGen/CodeGenModule.h467
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp614
-rw-r--r--lib/CodeGen/CodeGenTypes.h212
-rw-r--r--lib/CodeGen/Makefile23
-rw-r--r--lib/CodeGen/Mangle.cpp772
-rw-r--r--lib/CodeGen/Mangle.h44
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp100
-rw-r--r--lib/CodeGen/README.txt65
-rw-r--r--lib/Driver/Action.cpp79
-rw-r--r--lib/Driver/Arg.cpp192
-rw-r--r--lib/Driver/ArgList.cpp232
-rw-r--r--lib/Driver/CMakeLists.txt19
-rw-r--r--lib/Driver/Compilation.cpp174
-rw-r--r--lib/Driver/Driver.cpp1254
-rw-r--r--lib/Driver/HostInfo.cpp408
-rw-r--r--lib/Driver/InputInfo.h101
-rw-r--r--lib/Driver/Job.cpp31
-rw-r--r--lib/Driver/Makefile28
-rw-r--r--lib/Driver/OptTable.cpp265
-rw-r--r--lib/Driver/Option.cpp250
-rw-r--r--lib/Driver/Phases.cpp27
-rw-r--r--lib/Driver/Tool.cpp19
-rw-r--r--lib/Driver/ToolChain.cpp35
-rw-r--r--lib/Driver/ToolChains.cpp475
-rw-r--r--lib/Driver/ToolChains.h134
-rw-r--r--lib/Driver/Tools.cpp2033
-rw-r--r--lib/Driver/Tools.h316
-rw-r--r--lib/Driver/Types.cpp205
-rw-r--r--lib/Frontend/ASTConsumers.cpp451
-rw-r--r--lib/Frontend/AnalysisConsumer.cpp659
-rw-r--r--lib/Frontend/Backend.cpp415
-rw-r--r--lib/Frontend/CMakeLists.txt35
-rw-r--r--lib/Frontend/CacheTokens.cpp658
-rw-r--r--lib/Frontend/DependencyFile.cpp169
-rw-r--r--lib/Frontend/DiagChecker.cpp302
-rw-r--r--lib/Frontend/DocumentXML.cpp579
-rw-r--r--lib/Frontend/FixItRewriter.cpp199
-rw-r--r--lib/Frontend/GeneratePCH.cpp78
-rw-r--r--lib/Frontend/HTMLDiagnostics.cpp602
-rw-r--r--lib/Frontend/HTMLPrint.cpp92
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp327
-rw-r--r--lib/Frontend/InitPreprocessor.cpp495
-rw-r--r--lib/Frontend/Makefile18
-rw-r--r--lib/Frontend/ManagerRegistry.cpp20
-rw-r--r--lib/Frontend/PCHReader.cpp2260
-rw-r--r--lib/Frontend/PCHReaderDecl.cpp712
-rw-r--r--lib/Frontend/PCHReaderStmt.cpp1136
-rw-r--r--lib/Frontend/PCHWriter.cpp1966
-rw-r--r--lib/Frontend/PCHWriterDecl.cpp532
-rw-r--r--lib/Frontend/PCHWriterStmt.cpp829
-rw-r--r--lib/Frontend/PlistDiagnostics.cpp389
-rw-r--r--lib/Frontend/PrintParserCallbacks.cpp831
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp470
-rw-r--r--lib/Frontend/RewriteBlocks.cpp1162
-rw-r--r--lib/Frontend/RewriteMacros.cpp215
-rw-r--r--lib/Frontend/RewriteObjC.cpp4693
-rw-r--r--lib/Frontend/RewriteTest.cpp39
-rw-r--r--lib/Frontend/StmtXML.cpp409
-rw-r--r--lib/Frontend/TextDiagnosticBuffer.cpp39
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp710
-rw-r--r--lib/Frontend/Warnings.cpp106
-rw-r--r--lib/Headers/CMakeLists.txt25
-rw-r--r--lib/Headers/Makefile40
-rw-r--r--lib/Headers/emmintrin.h1329
-rw-r--r--lib/Headers/float.h71
-rw-r--r--lib/Headers/iso646.h43
-rw-r--r--lib/Headers/limits.h114
-rw-r--r--lib/Headers/mm_malloc.h59
-rw-r--r--lib/Headers/mmintrin.h449
-rw-r--r--lib/Headers/pmmintrin.h121
-rw-r--r--lib/Headers/stdarg.h47
-rw-r--r--lib/Headers/stdbool.h38
-rw-r--r--lib/Headers/stddef.h43
-rw-r--r--lib/Headers/stdint.h232
-rw-r--r--lib/Headers/tgmath.h1358
-rw-r--r--lib/Headers/tmmintrin.h218
-rw-r--r--lib/Headers/xmmintrin.h888
-rw-r--r--lib/Lex/CMakeLists.txt26
-rw-r--r--lib/Lex/HeaderMap.cpp245
-rw-r--r--lib/Lex/HeaderSearch.cpp446
-rw-r--r--lib/Lex/Lexer.cpp1809
-rw-r--r--lib/Lex/LiteralSupport.cpp929
-rw-r--r--lib/Lex/MacroArgs.cpp240
-rw-r--r--lib/Lex/MacroArgs.h109
-rw-r--r--lib/Lex/MacroInfo.cpp75
-rw-r--r--lib/Lex/Makefile28
-rw-r--r--lib/Lex/PPCaching.cpp113
-rw-r--r--lib/Lex/PPDirectives.cpp1665
-rw-r--r--lib/Lex/PPExpressions.cpp717
-rw-r--r--lib/Lex/PPLexerChange.cpp345
-rw-r--r--lib/Lex/PPMacroExpansion.cpp605
-rw-r--r--lib/Lex/PTHLexer.cpp701
-rw-r--r--lib/Lex/Pragma.cpp699
-rw-r--r--lib/Lex/Preprocessor.cpp478
-rw-r--r--lib/Lex/PreprocessorLexer.cpp45
-rw-r--r--lib/Lex/ScratchBuffer.cpp73
-rw-r--r--lib/Lex/TokenConcatenation.cpp219
-rw-r--r--lib/Lex/TokenLexer.cpp542
-rwxr-xr-xlib/Makefile15
-rw-r--r--lib/Parse/AttributeList.cpp145
-rw-r--r--lib/Parse/CMakeLists.txt21
-rw-r--r--lib/Parse/DeclSpec.cpp395
-rw-r--r--lib/Parse/ExtensionRAIIObject.h40
-rw-r--r--lib/Parse/Makefile22
-rw-r--r--lib/Parse/MinimalAction.cpp225
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp271
-rw-r--r--lib/Parse/ParseDecl.cpp2707
-rw-r--r--lib/Parse/ParseDeclCXX.cpp1292
-rw-r--r--lib/Parse/ParseExpr.cpp1514
-rw-r--r--lib/Parse/ParseExprCXX.cpp1166
-rw-r--r--lib/Parse/ParseInit.cpp308
-rw-r--r--lib/Parse/ParseObjc.cpp1708
-rw-r--r--lib/Parse/ParsePragma.cpp182
-rw-r--r--lib/Parse/ParsePragma.h44
-rw-r--r--lib/Parse/ParseStmt.cpp1435
-rw-r--r--lib/Parse/ParseTemplate.cpp812
-rw-r--r--lib/Parse/ParseTentative.cpp920
-rw-r--r--lib/Parse/Parser.cpp996
-rw-r--r--lib/Rewrite/CMakeLists.txt9
-rw-r--r--lib/Rewrite/DeltaTree.cpp485
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp574
-rw-r--r--lib/Rewrite/Makefile22
-rw-r--r--lib/Rewrite/RewriteRope.cpp807
-rw-r--r--lib/Rewrite/Rewriter.cpp228
-rw-r--r--lib/Rewrite/TokenRewriter.cpp98
-rw-r--r--lib/Sema/CMakeLists.txt33
-rw-r--r--lib/Sema/CXXFieldCollector.h76
-rw-r--r--lib/Sema/IdentifierResolver.cpp293
-rw-r--r--lib/Sema/IdentifierResolver.h214
-rw-r--r--lib/Sema/JumpDiagnostics.cpp327
-rw-r--r--lib/Sema/Makefile23
-rw-r--r--lib/Sema/ParseAST.cpp85
-rw-r--r--lib/Sema/Sema.cpp333
-rw-r--r--lib/Sema/Sema.h2814
-rw-r--r--lib/Sema/SemaAccess.cpp124
-rw-r--r--lib/Sema/SemaAttr.cpp211
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp312
-rw-r--r--lib/Sema/SemaChecking.cpp1449
-rw-r--r--lib/Sema/SemaDecl.cpp4415
-rw-r--r--lib/Sema/SemaDeclAttr.cpp1803
-rw-r--r--lib/Sema/SemaDeclCXX.cpp2823
-rw-r--r--lib/Sema/SemaDeclObjC.cpp2166
-rw-r--r--lib/Sema/SemaExpr.cpp5395
-rw-r--r--lib/Sema/SemaExprCXX.cpp1603
-rw-r--r--lib/Sema/SemaExprObjC.cpp860
-rw-r--r--lib/Sema/SemaInherit.cpp344
-rw-r--r--lib/Sema/SemaInherit.h248
-rw-r--r--lib/Sema/SemaInit.cpp1784
-rw-r--r--lib/Sema/SemaLookup.cpp1626
-rw-r--r--lib/Sema/SemaNamedCast.cpp932
-rw-r--r--lib/Sema/SemaOverload.cpp4485
-rw-r--r--lib/Sema/SemaOverload.h263
-rw-r--r--lib/Sema/SemaStmt.cpp1266
-rw-r--r--lib/Sema/SemaTemplate.cpp2651
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp1034
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp767
-rw-r--r--lib/Sema/SemaTemplateInstantiateExpr.cpp1278
-rw-r--r--lib/Sema/SemaTemplateInstantiateStmt.cpp443
-rw-r--r--lib/Sema/SemaType.cpp1301
261 files changed, 172202 insertions, 0 deletions
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
new file mode 100644
index 0000000..4df7671
--- /dev/null
+++ b/lib/AST/APValue.cpp
@@ -0,0 +1,108 @@
+//===--- APValue.cpp - Union class for APFloat/APSInt/Complex -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the APValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+
+const APValue &APValue::operator=(const APValue &RHS) {
+ if (Kind != RHS.Kind) {
+ MakeUninit();
+ if (RHS.isInt())
+ MakeInt();
+ else if (RHS.isFloat())
+ MakeFloat();
+ else if (RHS.isVector())
+ MakeVector();
+ else if (RHS.isComplexInt())
+ MakeComplexInt();
+ else if (RHS.isComplexFloat())
+ MakeComplexFloat();
+ else if (RHS.isLValue())
+ MakeLValue();
+ }
+ if (isInt())
+ setInt(RHS.getInt());
+ else if (isFloat())
+ setFloat(RHS.getFloat());
+ else if (isVector())
+ setVector(((Vec*)(void*)RHS.Data)->Elts, RHS.getVectorLength());
+ else if (isComplexInt())
+ setComplexInt(RHS.getComplexIntReal(), RHS.getComplexIntImag());
+ else if (isComplexFloat())
+ setComplexFloat(RHS.getComplexFloatReal(), RHS.getComplexFloatImag());
+ else if (isLValue())
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset());
+ return *this;
+}
+
+void APValue::MakeUninit() {
+ if (Kind == Int)
+ ((APSInt*)(void*)Data)->~APSInt();
+ else if (Kind == Float)
+ ((APFloat*)(void*)Data)->~APFloat();
+ else if (Kind == Vector)
+ ((Vec*)(void*)Data)->~Vec();
+ else if (Kind == ComplexInt)
+ ((ComplexAPSInt*)(void*)Data)->~ComplexAPSInt();
+ else if (Kind == ComplexFloat)
+ ((ComplexAPFloat*)(void*)Data)->~ComplexAPFloat();
+ else if (Kind == LValue) {
+ ((LV*)(void*)Data)->~LV();
+ }
+ Kind = Uninitialized;
+}
+
+void APValue::dump() const {
+ print(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+static double GetApproxValue(const llvm::APFloat &F) {
+ llvm::APFloat V = F;
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+void APValue::print(llvm::raw_ostream &OS) const {
+ switch (getKind()) {
+ default: assert(0 && "Unknown APValue kind!");
+ case Uninitialized:
+ OS << "Uninitialized";
+ return;
+ case Int:
+ OS << "Int: " << getInt();
+ return;
+ case Float:
+ OS << "Float: " << GetApproxValue(getFloat());
+ return;
+ case Vector:
+ OS << "Vector: " << getVectorElt(0);
+ for (unsigned i = 1; i != getVectorLength(); ++i)
+ OS << ", " << getVectorElt(i);
+ return;
+ case ComplexInt:
+ OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
+ return;
+ case ComplexFloat:
+ OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
+ << ", " << GetApproxValue(getComplexFloatImag());
+ case LValue:
+ OS << "LValue: <todo>";
+ return;
+ }
+}
+
diff --git a/lib/AST/ASTConsumer.cpp b/lib/AST/ASTConsumer.cpp
new file mode 100644
index 0000000..f37cbde
--- /dev/null
+++ b/lib/AST/ASTConsumer.cpp
@@ -0,0 +1,19 @@
+//===--- ASTConsumer.cpp - Abstract interface for reading ASTs --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTConsumer class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/DeclGroup.h"
+using namespace clang;
+
+void ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {}
+
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
new file mode 100644
index 0000000..29bca29
--- /dev/null
+++ b/lib/AST/ASTContext.cpp
@@ -0,0 +1,3332 @@
+//===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ASTContext interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+using namespace clang;
+
+enum FloatingRank {
+ FloatRank, DoubleRank, LongDoubleRank
+};
+
+ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM,
+ TargetInfo &t,
+ IdentifierTable &idents, SelectorTable &sels,
+ bool FreeMem, unsigned size_reserve,
+ bool InitializeBuiltins) :
+ GlobalNestedNameSpecifier(0), CFConstantStringTypeDecl(0),
+ ObjCFastEnumerationStateTypeDecl(0), SourceMgr(SM), LangOpts(LOpts),
+ FreeMemory(FreeMem), Target(t), Idents(idents), Selectors(sels),
+ ExternalSource(0) {
+ if (size_reserve > 0) Types.reserve(size_reserve);
+ InitBuiltinTypes();
+ TUDecl = TranslationUnitDecl::Create(*this);
+ BuiltinInfo.InitializeTargetBuiltins(Target);
+ if (InitializeBuiltins)
+ this->InitializeBuiltins(idents);
+ PrintingPolicy.CPlusPlus = LangOpts.CPlusPlus;
+}
+
+ASTContext::~ASTContext() {
+ // Deallocate all the types.
+ while (!Types.empty()) {
+ Types.back()->Destroy(*this);
+ Types.pop_back();
+ }
+
+ {
+ llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
+ I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end();
+ while (I != E) {
+ ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second);
+ delete R;
+ }
+ }
+
+ {
+ llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>::iterator
+ I = ObjCLayouts.begin(), E = ObjCLayouts.end();
+ while (I != E) {
+ ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second);
+ delete R;
+ }
+ }
+
+ // Destroy nested-name-specifiers.
+ for (llvm::FoldingSet<NestedNameSpecifier>::iterator
+ NNS = NestedNameSpecifiers.begin(),
+ NNSEnd = NestedNameSpecifiers.end();
+ NNS != NNSEnd;
+ /* Increment in loop */)
+ (*NNS++).Destroy(*this);
+
+ if (GlobalNestedNameSpecifier)
+ GlobalNestedNameSpecifier->Destroy(*this);
+
+ TUDecl->Destroy(*this);
+}
+
+void ASTContext::InitializeBuiltins(IdentifierTable &idents) {
+ BuiltinInfo.InitializeBuiltins(idents, LangOpts.NoBuiltin);
+}
+
+void
+ASTContext::setExternalSource(llvm::OwningPtr<ExternalASTSource> &Source) {
+ ExternalSource.reset(Source.take());
+}
+
+void ASTContext::PrintStats() const {
+ fprintf(stderr, "*** AST Context Stats:\n");
+ fprintf(stderr, " %d types total.\n", (int)Types.size());
+
+ unsigned counts[] = {
+#define TYPE(Name, Parent) 0,
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+ 0 // Extra
+ };
+
+ for (unsigned i = 0, e = Types.size(); i != e; ++i) {
+ Type *T = Types[i];
+ counts[(unsigned)T->getTypeClass()]++;
+ }
+
+ unsigned Idx = 0;
+ unsigned TotalBytes = 0;
+#define TYPE(Name, Parent) \
+ if (counts[Idx]) \
+ fprintf(stderr, " %d %s types\n", (int)counts[Idx], #Name); \
+ TotalBytes += counts[Idx] * sizeof(Name##Type); \
+ ++Idx;
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+
+ fprintf(stderr, "Total bytes = %d\n", int(TotalBytes));
+
+ if (ExternalSource.get()) {
+ fprintf(stderr, "\n");
+ ExternalSource->PrintStats();
+ }
+}
+
+
+void ASTContext::InitBuiltinType(QualType &R, BuiltinType::Kind K) {
+ Types.push_back((R = QualType(new (*this,8) BuiltinType(K),0)).getTypePtr());
+}
+
+void ASTContext::InitBuiltinTypes() {
+ assert(VoidTy.isNull() && "Context reinitialized?");
+
+ // C99 6.2.5p19.
+ InitBuiltinType(VoidTy, BuiltinType::Void);
+
+ // C99 6.2.5p2.
+ InitBuiltinType(BoolTy, BuiltinType::Bool);
+ // C99 6.2.5p3.
+ if (Target.isCharSigned())
+ InitBuiltinType(CharTy, BuiltinType::Char_S);
+ else
+ InitBuiltinType(CharTy, BuiltinType::Char_U);
+ // C99 6.2.5p4.
+ InitBuiltinType(SignedCharTy, BuiltinType::SChar);
+ InitBuiltinType(ShortTy, BuiltinType::Short);
+ InitBuiltinType(IntTy, BuiltinType::Int);
+ InitBuiltinType(LongTy, BuiltinType::Long);
+ InitBuiltinType(LongLongTy, BuiltinType::LongLong);
+
+ // C99 6.2.5p6.
+ InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
+ InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
+ InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
+ InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
+ InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
+
+ // C99 6.2.5p10.
+ InitBuiltinType(FloatTy, BuiltinType::Float);
+ InitBuiltinType(DoubleTy, BuiltinType::Double);
+ InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
+
+ // GNU extension, 128-bit integers.
+ InitBuiltinType(Int128Ty, BuiltinType::Int128);
+ InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
+
+ if (LangOpts.CPlusPlus) // C++ 3.9.1p5
+ InitBuiltinType(WCharTy, BuiltinType::WChar);
+ else // C99
+ WCharTy = getFromTargetType(Target.getWCharType());
+
+ // Placeholder type for functions.
+ InitBuiltinType(OverloadTy, BuiltinType::Overload);
+
+ // Placeholder type for type-dependent expressions whose type is
+ // completely unknown. No code should ever check a type against
+ // DependentTy and users should never see it; however, it is here to
+ // help diagnose failures to properly check for type-dependent
+ // expressions.
+ InitBuiltinType(DependentTy, BuiltinType::Dependent);
+
+ // C99 6.2.5p11.
+ FloatComplexTy = getComplexType(FloatTy);
+ DoubleComplexTy = getComplexType(DoubleTy);
+ LongDoubleComplexTy = getComplexType(LongDoubleTy);
+
+ BuiltinVaListType = QualType();
+ ObjCIdType = QualType();
+ IdStructType = 0;
+ ObjCClassType = QualType();
+ ClassStructType = 0;
+
+ ObjCConstantStringType = QualType();
+
+ // void * type
+ VoidPtrTy = getPointerType(VoidTy);
+
+ // nullptr type (C++0x 2.14.7)
+ InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
+}
+
+//===----------------------------------------------------------------------===//
+// Type Sizing and Analysis
+//===----------------------------------------------------------------------===//
+
+/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
+/// scalar floating point type.
+const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
+ const BuiltinType *BT = T->getAsBuiltinType();
+ assert(BT && "Not a floating point type!");
+ switch (BT->getKind()) {
+ default: assert(0 && "Not a floating point type!");
+ case BuiltinType::Float: return Target.getFloatFormat();
+ case BuiltinType::Double: return Target.getDoubleFormat();
+ case BuiltinType::LongDouble: return Target.getLongDoubleFormat();
+ }
+}
+
+/// getDeclAlign - Return a conservative estimate of the alignment of the
+/// specified decl. Note that bitfields do not have a valid alignment, so
+/// this method will assert on them.
+unsigned ASTContext::getDeclAlignInBytes(const Decl *D) {
+ unsigned Align = Target.getCharWidth();
+
+ if (const AlignedAttr* AA = D->getAttr<AlignedAttr>())
+ Align = std::max(Align, AA->getAlignment());
+
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ QualType T = VD->getType();
+ if (const ReferenceType* RT = T->getAsReferenceType()) {
+ unsigned AS = RT->getPointeeType().getAddressSpace();
+ Align = Target.getPointerAlign(AS);
+ } else if (!T->isIncompleteType() && !T->isFunctionType()) {
+ // Incomplete or function types default to 1.
+ while (isa<VariableArrayType>(T) || isa<IncompleteArrayType>(T))
+ T = cast<ArrayType>(T)->getElementType();
+
+ Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
+ }
+ }
+
+ return Align / Target.getCharWidth();
+}
+
+/// getTypeSize - Return the size of the specified type, in bits. This method
+/// does not work on incomplete types.
+std::pair<uint64_t, unsigned>
+ASTContext::getTypeInfo(const Type *T) {
+ uint64_t Width=0;
+ unsigned Align=8;
+ switch (T->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Should not see dependent types");
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // GCC extension: alignof(function) = 32 bits
+ Width = 0;
+ Align = 32;
+ break;
+
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ Width = 0;
+ Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
+ break;
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
+
+ std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType());
+ Width = EltInfo.first*CAT->getSize().getZExtValue();
+ Align = EltInfo.second;
+ break;
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ std::pair<uint64_t, unsigned> EltInfo =
+ getTypeInfo(cast<VectorType>(T)->getElementType());
+ Width = EltInfo.first*cast<VectorType>(T)->getNumElements();
+ Align = Width;
+ // If the alignment is not a power of 2, round up to the next power of 2.
+ // This happens for non-power-of-2 length vectors.
+ // FIXME: this should probably be a target property.
+ Align = 1 << llvm::Log2_32_Ceil(Align);
+ break;
+ }
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: assert(0 && "Unknown builtin type!");
+ case BuiltinType::Void:
+ // GCC extension: alignof(void) = 8 bits.
+ Width = 0;
+ Align = 8;
+ break;
+
+ case BuiltinType::Bool:
+ Width = Target.getBoolWidth();
+ Align = Target.getBoolAlign();
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ Width = Target.getCharWidth();
+ Align = Target.getCharAlign();
+ break;
+ case BuiltinType::WChar:
+ Width = Target.getWCharWidth();
+ Align = Target.getWCharAlign();
+ break;
+ case BuiltinType::UShort:
+ case BuiltinType::Short:
+ Width = Target.getShortWidth();
+ Align = Target.getShortAlign();
+ break;
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ Width = Target.getIntWidth();
+ Align = Target.getIntAlign();
+ break;
+ case BuiltinType::ULong:
+ case BuiltinType::Long:
+ Width = Target.getLongWidth();
+ Align = Target.getLongAlign();
+ break;
+ case BuiltinType::ULongLong:
+ case BuiltinType::LongLong:
+ Width = Target.getLongLongWidth();
+ Align = Target.getLongLongAlign();
+ break;
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ Width = 128;
+ Align = 128; // int128_t is 128-bit aligned on all targets.
+ break;
+ case BuiltinType::Float:
+ Width = Target.getFloatWidth();
+ Align = Target.getFloatAlign();
+ break;
+ case BuiltinType::Double:
+ Width = Target.getDoubleWidth();
+ Align = Target.getDoubleAlign();
+ break;
+ case BuiltinType::LongDouble:
+ Width = Target.getLongDoubleWidth();
+ Align = Target.getLongDoubleAlign();
+ break;
+ case BuiltinType::NullPtr:
+ Width = Target.getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
+ Align = Target.getPointerAlign(0); // == sizeof(void*)
+ break;
+ }
+ break;
+ case Type::FixedWidthInt:
+ // FIXME: This isn't precisely correct; the width/alignment should depend
+ // on the available types for the target
+ Width = cast<FixedWidthIntType>(T)->getWidth();
+ Width = std::max(llvm::NextPowerOf2(Width - 1), (uint64_t)8);
+ Align = Width;
+ break;
+ case Type::ExtQual:
+ // FIXME: Pointers into different addr spaces could have different sizes and
+ // alignment requirements: getPointerInfo should take an AddrSpace.
+ return getTypeInfo(QualType(cast<ExtQualType>(T)->getBaseType(), 0));
+ case Type::ObjCQualifiedId:
+ case Type::ObjCQualifiedInterface:
+ Width = Target.getPointerWidth(0);
+ Align = Target.getPointerAlign(0);
+ break;
+ case Type::BlockPointer: {
+ unsigned AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace();
+ Width = Target.getPointerWidth(AS);
+ Align = Target.getPointerAlign(AS);
+ break;
+ }
+ case Type::Pointer: {
+ unsigned AS = cast<PointerType>(T)->getPointeeType().getAddressSpace();
+ Width = Target.getPointerWidth(AS);
+ Align = Target.getPointerAlign(AS);
+ break;
+ }
+ case Type::LValueReference:
+ case Type::RValueReference:
+ // "When applied to a reference or a reference type, the result is the size
+ // of the referenced type." C++98 5.3.3p2: expr.sizeof.
+ // FIXME: This is wrong for struct layout: a reference in a struct has
+ // pointer size.
+ return getTypeInfo(cast<ReferenceType>(T)->getPointeeType());
+ case Type::MemberPointer: {
+ // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
+ // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
+ // If we ever want to support other ABIs this needs to be abstracted.
+
+ QualType Pointee = cast<MemberPointerType>(T)->getPointeeType();
+ std::pair<uint64_t, unsigned> PtrDiffInfo =
+ getTypeInfo(getPointerDiffType());
+ Width = PtrDiffInfo.first;
+ if (Pointee->isFunctionType())
+ Width *= 2;
+ Align = PtrDiffInfo.second;
+ break;
+ }
+ case Type::Complex: {
+ // Complex types have the same alignment as their elements, but twice the
+ // size.
+ std::pair<uint64_t, unsigned> EltInfo =
+ getTypeInfo(cast<ComplexType>(T)->getElementType());
+ Width = EltInfo.first*2;
+ Align = EltInfo.second;
+ break;
+ }
+ case Type::ObjCInterface: {
+ const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
+ const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
+ Width = Layout.getSize();
+ Align = Layout.getAlignment();
+ break;
+ }
+ case Type::Record:
+ case Type::Enum: {
+ const TagType *TT = cast<TagType>(T);
+
+ if (TT->getDecl()->isInvalidDecl()) {
+ Width = 1;
+ Align = 1;
+ break;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(TT))
+ return getTypeInfo(ET->getDecl()->getIntegerType());
+
+ const RecordType *RT = cast<RecordType>(TT);
+ const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl());
+ Width = Layout.getSize();
+ Align = Layout.getAlignment();
+ break;
+ }
+
+ case Type::Typedef: {
+ const TypedefDecl *Typedef = cast<TypedefType>(T)->getDecl();
+ if (const AlignedAttr *Aligned = Typedef->getAttr<AlignedAttr>()) {
+ Align = Aligned->getAlignment();
+ Width = getTypeSize(Typedef->getUnderlyingType().getTypePtr());
+ } else
+ return getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
+ break;
+ }
+
+ case Type::TypeOfExpr:
+ return getTypeInfo(cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType()
+ .getTypePtr());
+
+ case Type::TypeOf:
+ return getTypeInfo(cast<TypeOfType>(T)->getUnderlyingType().getTypePtr());
+
+ case Type::QualifiedName:
+ return getTypeInfo(cast<QualifiedNameType>(T)->getNamedType().getTypePtr());
+
+ case Type::TemplateSpecialization:
+ assert(getCanonicalType(T) != T &&
+ "Cannot request the size of a dependent type");
+ // FIXME: this is likely to be wrong once we support template
+ // aliases, since a template alias could refer to a typedef that
+ // has an __aligned__ attribute on it.
+ return getTypeInfo(getCanonicalType(T));
+ }
+
+ assert(Align && (Align & (Align-1)) == 0 && "Alignment must be power of 2");
+ return std::make_pair(Width, Align);
+}
+
+/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
+/// type for the current target in bits. This can be different than the ABI
+/// alignment in cases where it is beneficial for performance to overalign
+/// a data type.
+unsigned ASTContext::getPreferredTypeAlign(const Type *T) {
+ unsigned ABIAlign = getTypeAlign(T);
+
+ // Double and long long should be naturally aligned if possible.
+ if (const ComplexType* CT = T->getAsComplexType())
+ T = CT->getElementType().getTypePtr();
+ if (T->isSpecificBuiltinType(BuiltinType::Double) ||
+ T->isSpecificBuiltinType(BuiltinType::LongLong))
+ return std::max(ABIAlign, (unsigned)getTypeSize(T));
+
+ return ABIAlign;
+}
+
+
+/// LayoutField - Field layout.
+void ASTRecordLayout::LayoutField(const FieldDecl *FD, unsigned FieldNo,
+ bool IsUnion, unsigned StructPacking,
+ ASTContext &Context) {
+ unsigned FieldPacking = StructPacking;
+ uint64_t FieldOffset = IsUnion ? 0 : Size;
+ uint64_t FieldSize;
+ unsigned FieldAlign;
+
+ // FIXME: Should this override struct packing? Probably we want to
+ // take the minimum?
+ if (const PackedAttr *PA = FD->getAttr<PackedAttr>())
+ FieldPacking = PA->getAlignment();
+
+ if (const Expr *BitWidthExpr = FD->getBitWidth()) {
+ // TODO: Need to check this algorithm on other targets!
+ // (tested on Linux-X86)
+ FieldSize = BitWidthExpr->EvaluateAsInt(Context).getZExtValue();
+
+ std::pair<uint64_t, unsigned> FieldInfo =
+ Context.getTypeInfo(FD->getType());
+ uint64_t TypeSize = FieldInfo.first;
+
+ // Determine the alignment of this bitfield. The packing
+ // attributes define a maximum and the alignment attribute defines
+ // a minimum.
+ // FIXME: What is the right behavior when the specified alignment
+ // is smaller than the specified packing?
+ FieldAlign = FieldInfo.second;
+ if (FieldPacking)
+ FieldAlign = std::min(FieldAlign, FieldPacking);
+ if (const AlignedAttr *AA = FD->getAttr<AlignedAttr>())
+ FieldAlign = std::max(FieldAlign, AA->getAlignment());
+
+ // Check if we need to add padding to give the field the correct
+ // alignment.
+ if (FieldSize == 0 || (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)
+ FieldOffset = (FieldOffset + (FieldAlign-1)) & ~(FieldAlign-1);
+
+ // Padding members don't affect overall alignment
+ if (!FD->getIdentifier())
+ FieldAlign = 1;
+ } else {
+ if (FD->getType()->isIncompleteArrayType()) {
+ // This is a flexible array member; we can't directly
+ // query getTypeInfo about these, so we figure it out here.
+ // Flexible array members don't have any size, but they
+ // have to be aligned appropriately for their element type.
+ FieldSize = 0;
+ const ArrayType* ATy = Context.getAsArrayType(FD->getType());
+ FieldAlign = Context.getTypeAlign(ATy->getElementType());
+ } else if (const ReferenceType *RT = FD->getType()->getAsReferenceType()) {
+ unsigned AS = RT->getPointeeType().getAddressSpace();
+ FieldSize = Context.Target.getPointerWidth(AS);
+ FieldAlign = Context.Target.getPointerAlign(AS);
+ } else {
+ std::pair<uint64_t, unsigned> FieldInfo =
+ Context.getTypeInfo(FD->getType());
+ FieldSize = FieldInfo.first;
+ FieldAlign = FieldInfo.second;
+ }
+
+ // Determine the alignment of this bitfield. The packing
+ // attributes define a maximum and the alignment attribute defines
+ // a minimum. Additionally, the packing alignment must be at least
+ // a byte for non-bitfields.
+ //
+ // FIXME: What is the right behavior when the specified alignment
+ // is smaller than the specified packing?
+ if (FieldPacking)
+ FieldAlign = std::min(FieldAlign, std::max(8U, FieldPacking));
+ if (const AlignedAttr *AA = FD->getAttr<AlignedAttr>())
+ FieldAlign = std::max(FieldAlign, AA->getAlignment());
+
+ // Round up the current record size to the field's alignment boundary.
+ FieldOffset = (FieldOffset + (FieldAlign-1)) & ~(FieldAlign-1);
+ }
+
+ // Place this field at the current location.
+ FieldOffsets[FieldNo] = FieldOffset;
+
+ // Reserve space for this field.
+ if (IsUnion) {
+ Size = std::max(Size, FieldSize);
+ } else {
+ Size = FieldOffset + FieldSize;
+ }
+
+ // Remember the next available offset.
+ NextOffset = Size;
+
+ // Remember max struct/class alignment.
+ Alignment = std::max(Alignment, FieldAlign);
+}
+
+static void CollectLocalObjCIvars(ASTContext *Ctx,
+ const ObjCInterfaceDecl *OI,
+ llvm::SmallVectorImpl<FieldDecl*> &Fields) {
+ for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(),
+ E = OI->ivar_end(); I != E; ++I) {
+ ObjCIvarDecl *IVDecl = *I;
+ if (!IVDecl->isInvalidDecl())
+ Fields.push_back(cast<FieldDecl>(IVDecl));
+ }
+}
+
+void ASTContext::CollectObjCIvars(const ObjCInterfaceDecl *OI,
+ llvm::SmallVectorImpl<FieldDecl*> &Fields) {
+ if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
+ CollectObjCIvars(SuperClass, Fields);
+ CollectLocalObjCIvars(this, OI, Fields);
+}
+
+void ASTContext::CollectProtocolSynthesizedIvars(const ObjCProtocolDecl *PD,
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+ for (ObjCContainerDecl::prop_iterator I = PD->prop_begin(*this),
+ E = PD->prop_end(*this); I != E; ++I)
+ if (ObjCIvarDecl *Ivar = (*I)->getPropertyIvarDecl())
+ Ivars.push_back(Ivar);
+
+ // Also look into nested protocols.
+ for (ObjCProtocolDecl::protocol_iterator P = PD->protocol_begin(),
+ E = PD->protocol_end(); P != E; ++P)
+ CollectProtocolSynthesizedIvars(*P, Ivars);
+}
+
+/// CollectSynthesizedIvars -
+/// This routine collect synthesized ivars for the designated class.
+///
+void ASTContext::CollectSynthesizedIvars(const ObjCInterfaceDecl *OI,
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+ for (ObjCInterfaceDecl::prop_iterator I = OI->prop_begin(*this),
+ E = OI->prop_end(*this); I != E; ++I) {
+ if (ObjCIvarDecl *Ivar = (*I)->getPropertyIvarDecl())
+ Ivars.push_back(Ivar);
+ }
+ // Also look into interface's protocol list for properties declared
+ // in the protocol and whose ivars are synthesized.
+ for (ObjCInterfaceDecl::protocol_iterator P = OI->protocol_begin(),
+ PE = OI->protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *PD = (*P);
+ CollectProtocolSynthesizedIvars(PD, Ivars);
+ }
+}
+
+/// getInterfaceLayoutImpl - Get or compute information about the
+/// layout of the given interface.
+///
+/// \param Impl - If given, also include the layout of the interface's
+/// implementation. This may differ by including synthesized ivars.
+const ASTRecordLayout &
+ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
+ const ObjCImplementationDecl *Impl) {
+ assert(!D->isForwardDecl() && "Invalid interface decl!");
+
+ // Look up this layout, if already laid out, return what we have.
+ ObjCContainerDecl *Key =
+ Impl ? (ObjCContainerDecl*) Impl : (ObjCContainerDecl*) D;
+ if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
+ return *Entry;
+
+ unsigned FieldCount = D->ivar_size();
+ // Add in synthesized ivar count if laying out an implementation.
+ if (Impl) {
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CollectSynthesizedIvars(D, Ivars);
+ FieldCount += Ivars.size();
+ // If there aren't any sythesized ivars then reuse the interface
+ // entry. Note we can't cache this because we simply free all
+ // entries later; however we shouldn't look up implementations
+ // frequently.
+ if (FieldCount == D->ivar_size())
+ return getObjCLayout(D, 0);
+ }
+
+ ASTRecordLayout *NewEntry = NULL;
+ if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
+ const ASTRecordLayout &SL = getASTObjCInterfaceLayout(SD);
+ unsigned Alignment = SL.getAlignment();
+
+ // We start laying out ivars not at the end of the superclass
+ // structure, but at the next byte following the last field.
+ uint64_t Size = llvm::RoundUpToAlignment(SL.NextOffset, 8);
+
+ ObjCLayouts[Key] = NewEntry = new ASTRecordLayout(Size, Alignment);
+ NewEntry->InitializeLayout(FieldCount);
+ } else {
+ ObjCLayouts[Key] = NewEntry = new ASTRecordLayout();
+ NewEntry->InitializeLayout(FieldCount);
+ }
+
+ unsigned StructPacking = 0;
+ if (const PackedAttr *PA = D->getAttr<PackedAttr>())
+ StructPacking = PA->getAlignment();
+
+ if (const AlignedAttr *AA = D->getAttr<AlignedAttr>())
+ NewEntry->SetAlignment(std::max(NewEntry->getAlignment(),
+ AA->getAlignment()));
+
+ // Layout each ivar sequentially.
+ unsigned i = 0;
+ for (ObjCInterfaceDecl::ivar_iterator IVI = D->ivar_begin(),
+ IVE = D->ivar_end(); IVI != IVE; ++IVI) {
+ const ObjCIvarDecl* Ivar = (*IVI);
+ NewEntry->LayoutField(Ivar, i++, false, StructPacking, *this);
+ }
+ // And synthesized ivars, if this is an implementation.
+ if (Impl) {
+ // FIXME. Do we need to colltect twice?
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CollectSynthesizedIvars(D, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ NewEntry->LayoutField(Ivars[k], i++, false, StructPacking, *this);
+ }
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ NewEntry->FinalizeLayout();
+ return *NewEntry;
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) {
+ return getObjCLayout(D, 0);
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCImplementationLayout(const ObjCImplementationDecl *D) {
+ return getObjCLayout(D->getClassInterface(), D);
+}
+
+/// getASTRecordLayout - Get or compute information about the layout of the
+/// specified record (struct/union/class), which indicates its size and field
+/// position information.
+const ASTRecordLayout &ASTContext::getASTRecordLayout(const RecordDecl *D) {
+ D = D->getDefinition(*this);
+ assert(D && "Cannot get layout of forward declarations!");
+
+ // Look up this layout, if already laid out, return what we have.
+ const ASTRecordLayout *&Entry = ASTRecordLayouts[D];
+ if (Entry) return *Entry;
+
+ // Allocate and assign into ASTRecordLayouts here. The "Entry" reference can
+ // be invalidated (dangle) if the ASTRecordLayouts hashtable is inserted into.
+ ASTRecordLayout *NewEntry = new ASTRecordLayout();
+ Entry = NewEntry;
+
+ // FIXME: Avoid linear walk through the fields, if possible.
+ NewEntry->InitializeLayout(std::distance(D->field_begin(*this),
+ D->field_end(*this)));
+ bool IsUnion = D->isUnion();
+
+ unsigned StructPacking = 0;
+ if (const PackedAttr *PA = D->getAttr<PackedAttr>())
+ StructPacking = PA->getAlignment();
+
+ if (const AlignedAttr *AA = D->getAttr<AlignedAttr>())
+ NewEntry->SetAlignment(std::max(NewEntry->getAlignment(),
+ AA->getAlignment()));
+
+ // Layout each field, for now, just sequentially, respecting alignment. In
+ // the future, this will need to be tweakable by targets.
+ unsigned FieldIdx = 0;
+ for (RecordDecl::field_iterator Field = D->field_begin(*this),
+ FieldEnd = D->field_end(*this);
+ Field != FieldEnd; (void)++Field, ++FieldIdx)
+ NewEntry->LayoutField(*Field, FieldIdx, IsUnion, StructPacking, *this);
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ NewEntry->FinalizeLayout(getLangOptions().CPlusPlus);
+ return *NewEntry;
+}
+
+//===----------------------------------------------------------------------===//
+// Type creation/memoization methods
+//===----------------------------------------------------------------------===//
+
+QualType ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getAddressSpace() == AddressSpace)
+ return T;
+
+ // If we are composing extended qualifiers together, merge together into one
+ // ExtQualType node.
+ unsigned CVRQuals = T.getCVRQualifiers();
+ QualType::GCAttrTypes GCAttr = QualType::GCNone;
+ Type *TypeNode = T.getTypePtr();
+
+ if (ExtQualType *EQT = dyn_cast<ExtQualType>(TypeNode)) {
+ // If this type already has an address space specified, it cannot get
+ // another one.
+ assert(EQT->getAddressSpace() == 0 &&
+ "Type cannot be in multiple addr spaces!");
+ GCAttr = EQT->getObjCGCAttr();
+ TypeNode = EQT->getBaseType();
+ }
+
+ // Check if we've already instantiated this type.
+ llvm::FoldingSetNodeID ID;
+ ExtQualType::Profile(ID, TypeNode, AddressSpace, GCAttr);
+ void *InsertPos = 0;
+ if (ExtQualType *EXTQy = ExtQualTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(EXTQy, CVRQuals);
+
+ // If the base type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!TypeNode->isCanonical()) {
+ Canonical = getAddrSpaceQualType(CanT, AddressSpace);
+
+ // Update InsertPos, the previous call could have invalidated it.
+ ExtQualType *NewIP = ExtQualTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ ExtQualType *New =
+ new (*this, 8) ExtQualType(TypeNode, Canonical, AddressSpace, GCAttr);
+ ExtQualTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, CVRQuals);
+}
+
+QualType ASTContext::getObjCGCQualType(QualType T,
+ QualType::GCAttrTypes GCAttr) {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getObjCGCAttr() == GCAttr)
+ return T;
+
+ // If we are composing extended qualifiers together, merge together into one
+ // ExtQualType node.
+ unsigned CVRQuals = T.getCVRQualifiers();
+ Type *TypeNode = T.getTypePtr();
+ unsigned AddressSpace = 0;
+
+ if (ExtQualType *EQT = dyn_cast<ExtQualType>(TypeNode)) {
+ // If this type already has an address space specified, it cannot get
+ // another one.
+ assert(EQT->getObjCGCAttr() == QualType::GCNone &&
+ "Type cannot be in multiple addr spaces!");
+ AddressSpace = EQT->getAddressSpace();
+ TypeNode = EQT->getBaseType();
+ }
+
+ // Check if we've already instantiated an gc qual'd type of this type.
+ llvm::FoldingSetNodeID ID;
+ ExtQualType::Profile(ID, TypeNode, AddressSpace, GCAttr);
+ void *InsertPos = 0;
+ if (ExtQualType *EXTQy = ExtQualTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(EXTQy, CVRQuals);
+
+ // If the base type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ // FIXME: Isn't this also not canonical if the base type is a array
+ // or pointer type? I can't find any documentation for objc_gc, though...
+ QualType Canonical;
+ if (!T->isCanonical()) {
+ Canonical = getObjCGCQualType(CanT, GCAttr);
+
+ // Update InsertPos, the previous call could have invalidated it.
+ ExtQualType *NewIP = ExtQualTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ ExtQualType *New =
+ new (*this, 8) ExtQualType(TypeNode, Canonical, AddressSpace, GCAttr);
+ ExtQualTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, CVRQuals);
+}
+
+/// getComplexType - Return the uniqued reference to the type for a complex
+/// number with the specified element type.
+QualType ASTContext::getComplexType(QualType T) {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ComplexType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(CT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T->isCanonical()) {
+ Canonical = getComplexType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ ComplexType *New = new (*this,8) ComplexType(T, Canonical);
+ Types.push_back(New);
+ ComplexTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getFixedWidthIntType(unsigned Width, bool Signed) {
+ llvm::DenseMap<unsigned, FixedWidthIntType*> &Map = Signed ?
+ SignedFixedWidthIntTypes : UnsignedFixedWidthIntTypes;
+ FixedWidthIntType *&Entry = Map[Width];
+ if (!Entry)
+ Entry = new FixedWidthIntType(Width, Signed);
+ return QualType(Entry, 0);
+}
+
+/// getPointerType - Return the uniqued reference to the type for a pointer to
+/// the specified type.
+QualType ASTContext::getPointerType(QualType T) {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ PointerType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T->isCanonical()) {
+ Canonical = getPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ PointerType *New = new (*this,8) PointerType(T, Canonical);
+ Types.push_back(New);
+ PointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getBlockPointerType - Return the uniqued reference to the type for
+/// a pointer to the specified block.
+QualType ASTContext::getBlockPointerType(QualType T) {
+ assert(T->isFunctionType() && "block of function types only");
+ // Unique pointers, to guarantee there is only one block of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ BlockPointerType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (BlockPointerType *PT =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the block pointee type isn't canonical, this won't be a canonical
+ // type either so fill in the canonical type field.
+ QualType Canonical;
+ if (!T->isCanonical()) {
+ Canonical = getBlockPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ BlockPointerType *NewIP =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ BlockPointerType *New = new (*this,8) BlockPointerType(T, Canonical);
+ Types.push_back(New);
+ BlockPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getLValueReferenceType - Return the uniqued reference to the type for an
+/// lvalue reference to the specified type.
+QualType ASTContext::getLValueReferenceType(QualType T) {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (LValueReferenceType *RT =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T->isCanonical()) {
+ Canonical = getLValueReferenceType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ LValueReferenceType *NewIP =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+
+ LValueReferenceType *New = new (*this,8) LValueReferenceType(T, Canonical);
+ Types.push_back(New);
+ LValueReferenceTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getRValueReferenceType - Return the uniqued reference to the type for an
+/// rvalue reference to the specified type.
+QualType ASTContext::getRValueReferenceType(QualType T) {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (RValueReferenceType *RT =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T->isCanonical()) {
+ Canonical = getRValueReferenceType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ RValueReferenceType *NewIP =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+
+ RValueReferenceType *New = new (*this,8) RValueReferenceType(T, Canonical);
+ Types.push_back(New);
+ RValueReferenceTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getMemberPointerType - Return the uniqued reference to the type for a
+/// member pointer to the specified type, in the specified class.
+QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls)
+{
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ MemberPointerType::Profile(ID, T, Cls);
+
+ void *InsertPos = 0;
+ if (MemberPointerType *PT =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee or class type isn't canonical, this won't be a canonical
+ // type either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T->isCanonical()) {
+ Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls));
+
+ // Get the new insert position for the node we care about.
+ MemberPointerType *NewIP =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ MemberPointerType *New = new (*this,8) MemberPointerType(T, Cls, Canonical);
+ Types.push_back(New);
+ MemberPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getConstantArrayType - Return the unique reference to the type for an
+/// array of the specified element type.
+QualType ASTContext::getConstantArrayType(QualType EltTy,
+ const llvm::APInt &ArySizeIn,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned EltTypeQuals) {
+ assert((EltTy->isDependentType() || EltTy->isConstantSizeType()) &&
+ "Constant array of VLAs is illegal!");
+
+ // Convert the array size into a canonical width matching the pointer size for
+ // the target.
+ llvm::APInt ArySize(ArySizeIn);
+ ArySize.zextOrTrunc(Target.getPointerWidth(EltTy.getAddressSpace()));
+
+ llvm::FoldingSetNodeID ID;
+ ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, EltTypeQuals);
+
+ void *InsertPos = 0;
+ if (ConstantArrayType *ATP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(ATP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!EltTy->isCanonical()) {
+ Canonical = getConstantArrayType(getCanonicalType(EltTy), ArySize,
+ ASM, EltTypeQuals);
+ // Get the new insert position for the node we care about.
+ ConstantArrayType *NewIP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+
+ ConstantArrayType *New =
+ new(*this,8)ConstantArrayType(EltTy, Canonical, ArySize, ASM, EltTypeQuals);
+ ConstantArrayTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getVariableArrayType - Returns a non-unique reference to the type for a
+/// variable array of the specified element type.
+QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned EltTypeQuals) {
+ // Since we don't unique expressions, it isn't possible to unique VLA's
+ // that have an expression provided for their size.
+
+ VariableArrayType *New =
+ new(*this,8)VariableArrayType(EltTy,QualType(), NumElts, ASM, EltTypeQuals);
+
+ VariableArrayTypes.push_back(New);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getDependentSizedArrayType - Returns a non-unique reference to
+/// the type for a dependently-sized array of the specified element
+/// type. FIXME: We will need these to be uniqued, or at least
+/// comparable, at some point.
+QualType ASTContext::getDependentSizedArrayType(QualType EltTy, Expr *NumElts,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned EltTypeQuals) {
+ assert((NumElts->isTypeDependent() || NumElts->isValueDependent()) &&
+ "Size must be type- or value-dependent!");
+
+ // Since we don't unique expressions, it isn't possible to unique
+ // dependently-sized array types.
+
+ DependentSizedArrayType *New =
+ new (*this,8) DependentSizedArrayType(EltTy, QualType(), NumElts,
+ ASM, EltTypeQuals);
+
+ DependentSizedArrayTypes.push_back(New);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType ASTContext::getIncompleteArrayType(QualType EltTy,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned EltTypeQuals) {
+ llvm::FoldingSetNodeID ID;
+ IncompleteArrayType::Profile(ID, EltTy, ASM, EltTypeQuals);
+
+ void *InsertPos = 0;
+ if (IncompleteArrayType *ATP =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(ATP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+
+ if (!EltTy->isCanonical()) {
+ Canonical = getIncompleteArrayType(getCanonicalType(EltTy),
+ ASM, EltTypeQuals);
+
+ // Get the new insert position for the node we care about.
+ IncompleteArrayType *NewIP =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+
+ IncompleteArrayType *New = new (*this,8) IncompleteArrayType(EltTy, Canonical,
+ ASM, EltTypeQuals);
+
+ IncompleteArrayTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getVectorType - Return the unique reference to a vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts) {
+ BuiltinType *baseType;
+
+ baseType = dyn_cast<BuiltinType>(getCanonicalType(vecType).getTypePtr());
+ assert(baseType != 0 && "getVectorType(): Expecting a built-in type");
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::Vector);
+ void *InsertPos = 0;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType->isCanonical()) {
+ Canonical = getVectorType(getCanonicalType(vecType), NumElts);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ VectorType *New = new (*this,8) VectorType(vecType, NumElts, Canonical);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getExtVectorType - Return the unique reference to an extended vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) {
+ BuiltinType *baseType;
+
+ baseType = dyn_cast<BuiltinType>(getCanonicalType(vecType).getTypePtr());
+ assert(baseType != 0 && "getExtVectorType(): Expecting a built-in type");
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::ExtVector);
+ void *InsertPos = 0;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType->isCanonical()) {
+ Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+ ExtVectorType *New = new (*this,8) ExtVectorType(vecType, NumElts, Canonical);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
+///
+QualType ASTContext::getFunctionNoProtoType(QualType ResultTy) {
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionNoProtoType::Profile(ID, ResultTy);
+
+ void *InsertPos = 0;
+ if (FunctionNoProtoType *FT =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FT, 0);
+
+ QualType Canonical;
+ if (!ResultTy->isCanonical()) {
+ Canonical = getFunctionNoProtoType(getCanonicalType(ResultTy));
+
+ // Get the new insert position for the node we care about.
+ FunctionNoProtoType *NewIP =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+
+ FunctionNoProtoType *New =new(*this,8)FunctionNoProtoType(ResultTy,Canonical);
+ Types.push_back(New);
+ FunctionNoProtoTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getFunctionType - Return a normal function type with a typed argument
+/// list. isVariadic indicates whether the argument list includes '...'.
+QualType ASTContext::getFunctionType(QualType ResultTy,const QualType *ArgArray,
+ unsigned NumArgs, bool isVariadic,
+ unsigned TypeQuals, bool hasExceptionSpec,
+ bool hasAnyExceptionSpec, unsigned NumExs,
+ const QualType *ExArray) {
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionProtoType::Profile(ID, ResultTy, ArgArray, NumArgs, isVariadic,
+ TypeQuals, hasExceptionSpec, hasAnyExceptionSpec,
+ NumExs, ExArray);
+
+ void *InsertPos = 0;
+ if (FunctionProtoType *FTP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FTP, 0);
+
+ // Determine whether the type being created is already canonical or not.
+ bool isCanonical = ResultTy->isCanonical();
+ if (hasExceptionSpec)
+ isCanonical = false;
+ for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
+ if (!ArgArray[i]->isCanonical())
+ isCanonical = false;
+
+ // If this type isn't canonical, get the canonical version of it.
+ // The exception spec is not part of the canonical type.
+ QualType Canonical;
+ if (!isCanonical) {
+ llvm::SmallVector<QualType, 16> CanonicalArgs;
+ CanonicalArgs.reserve(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ CanonicalArgs.push_back(getCanonicalType(ArgArray[i]));
+
+ Canonical = getFunctionType(getCanonicalType(ResultTy),
+ CanonicalArgs.data(), NumArgs,
+ isVariadic, TypeQuals);
+
+ // Get the new insert position for the node we care about.
+ FunctionProtoType *NewIP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
+ }
+
+ // FunctionProtoType objects are allocated with extra bytes after them
+ // for two variable size arrays (for parameter and exception types) at the
+ // end of them.
+ FunctionProtoType *FTP =
+ (FunctionProtoType*)Allocate(sizeof(FunctionProtoType) +
+ NumArgs*sizeof(QualType) +
+ NumExs*sizeof(QualType), 8);
+ new (FTP) FunctionProtoType(ResultTy, ArgArray, NumArgs, isVariadic,
+ TypeQuals, hasExceptionSpec, hasAnyExceptionSpec,
+ ExArray, NumExs, Canonical);
+ Types.push_back(FTP);
+ FunctionProtoTypes.InsertNode(FTP, InsertPos);
+ return QualType(FTP, 0);
+}
+
+/// getTypeDeclType - Return the unique reference to the type for the
+/// specified type declaration.
+QualType ASTContext::getTypeDeclType(TypeDecl *Decl, TypeDecl* PrevDecl) {
+ assert(Decl && "Passed null for Decl param");
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (TypedefDecl *Typedef = dyn_cast<TypedefDecl>(Decl))
+ return getTypedefType(Typedef);
+ else if (isa<TemplateTypeParmDecl>(Decl)) {
+ assert(false && "Template type parameter types are always available.");
+ } else if (ObjCInterfaceDecl *ObjCInterface = dyn_cast<ObjCInterfaceDecl>(Decl))
+ return getObjCInterfaceType(ObjCInterface);
+
+ if (RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
+ if (PrevDecl)
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ else
+ Decl->TypeForDecl = new (*this,8) RecordType(Record);
+ }
+ else if (EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
+ if (PrevDecl)
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ else
+ Decl->TypeForDecl = new (*this,8) EnumType(Enum);
+ }
+ else
+ assert(false && "TypeDecl without a type?");
+
+ if (!PrevDecl) Types.push_back(Decl->TypeForDecl);
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getTypedefType - Return the unique reference to the type for the
+/// specified typename decl.
+QualType ASTContext::getTypedefType(TypedefDecl *Decl) {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ QualType Canonical = getCanonicalType(Decl->getUnderlyingType());
+ Decl->TypeForDecl = new(*this,8) TypedefType(Type::Typedef, Decl, Canonical);
+ Types.push_back(Decl->TypeForDecl);
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getObjCInterfaceType - Return the unique reference to the type for the
+/// specified ObjC interface decl.
+QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl) {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ ObjCInterfaceDecl *OID = const_cast<ObjCInterfaceDecl*>(Decl);
+ Decl->TypeForDecl = new(*this,8) ObjCInterfaceType(Type::ObjCInterface, OID);
+ Types.push_back(Decl->TypeForDecl);
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// \brief Retrieve the template type parameter type for a template
+/// parameter with the given depth, index, and (optionally) name.
+QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
+ IdentifierInfo *Name) {
+ llvm::FoldingSetNodeID ID;
+ TemplateTypeParmType::Profile(ID, Depth, Index, Name);
+ void *InsertPos = 0;
+ TemplateTypeParmType *TypeParm
+ = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (TypeParm)
+ return QualType(TypeParm, 0);
+
+ if (Name)
+ TypeParm = new (*this, 8) TemplateTypeParmType(Depth, Index, Name,
+ getTemplateTypeParmType(Depth, Index));
+ else
+ TypeParm = new (*this, 8) TemplateTypeParmType(Depth, Index);
+
+ Types.push_back(TypeParm);
+ TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
+
+ return QualType(TypeParm, 0);
+}
+
+QualType
+ASTContext::getTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ QualType Canon) {
+ if (!Canon.isNull())
+ Canon = getCanonicalType(Canon);
+
+ llvm::FoldingSetNodeID ID;
+ TemplateSpecializationType::Profile(ID, Template, Args, NumArgs);
+
+ void *InsertPos = 0;
+ TemplateSpecializationType *Spec
+ = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (Spec)
+ return QualType(Spec, 0);
+
+ void *Mem = Allocate((sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ 8);
+ Spec = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, Canon);
+ Types.push_back(Spec);
+ TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
+
+ return QualType(Spec, 0);
+}
+
+QualType
+ASTContext::getQualifiedNameType(NestedNameSpecifier *NNS,
+ QualType NamedType) {
+ llvm::FoldingSetNodeID ID;
+ QualifiedNameType::Profile(ID, NNS, NamedType);
+
+ void *InsertPos = 0;
+ QualifiedNameType *T
+ = QualifiedNameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ T = new (*this) QualifiedNameType(NNS, NamedType,
+ getCanonicalType(NamedType));
+ Types.push_back(T);
+ QualifiedNameTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType ASTContext::getTypenameType(NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ QualType Canon) {
+ assert(NNS->isDependent() && "nested-name-specifier must be dependent");
+
+ if (Canon.isNull()) {
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS != NNS)
+ Canon = getTypenameType(CanonNNS, Name);
+ }
+
+ llvm::FoldingSetNodeID ID;
+ TypenameType::Profile(ID, NNS, Name);
+
+ void *InsertPos = 0;
+ TypenameType *T
+ = TypenameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ T = new (*this) TypenameType(NNS, Name, Canon);
+ Types.push_back(T);
+ TypenameTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType
+ASTContext::getTypenameType(NestedNameSpecifier *NNS,
+ const TemplateSpecializationType *TemplateId,
+ QualType Canon) {
+ assert(NNS->isDependent() && "nested-name-specifier must be dependent");
+
+ if (Canon.isNull()) {
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ QualType CanonType = getCanonicalType(QualType(TemplateId, 0));
+ if (CanonNNS != NNS || CanonType != QualType(TemplateId, 0)) {
+ const TemplateSpecializationType *CanonTemplateId
+ = CanonType->getAsTemplateSpecializationType();
+ assert(CanonTemplateId &&
+ "Canonical type must also be a template specialization type");
+ Canon = getTypenameType(CanonNNS, CanonTemplateId);
+ }
+ }
+
+ llvm::FoldingSetNodeID ID;
+ TypenameType::Profile(ID, NNS, TemplateId);
+
+ void *InsertPos = 0;
+ TypenameType *T
+ = TypenameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ T = new (*this) TypenameType(NNS, TemplateId, Canon);
+ Types.push_back(T);
+ TypenameTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+/// CmpProtocolNames - Comparison predicate for sorting protocols
+/// alphabetically.
+static bool CmpProtocolNames(const ObjCProtocolDecl *LHS,
+ const ObjCProtocolDecl *RHS) {
+ return LHS->getDeclName() < RHS->getDeclName();
+}
+
+static void SortAndUniqueProtocols(ObjCProtocolDecl **&Protocols,
+ unsigned &NumProtocols) {
+ ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols;
+
+ // Sort protocols, keyed by name.
+ std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames);
+
+ // Remove duplicates.
+ ProtocolsEnd = std::unique(Protocols, ProtocolsEnd);
+ NumProtocols = ProtocolsEnd-Protocols;
+}
+
+
+/// getObjCQualifiedInterfaceType - Return a ObjCQualifiedInterfaceType type for
+/// the given interface decl and the conforming protocol list.
+QualType ASTContext::getObjCQualifiedInterfaceType(ObjCInterfaceDecl *Decl,
+ ObjCProtocolDecl **Protocols, unsigned NumProtocols) {
+ // Sort the protocol list alphabetically to canonicalize it.
+ SortAndUniqueProtocols(Protocols, NumProtocols);
+
+ llvm::FoldingSetNodeID ID;
+ ObjCQualifiedInterfaceType::Profile(ID, Decl, Protocols, NumProtocols);
+
+ void *InsertPos = 0;
+ if (ObjCQualifiedInterfaceType *QT =
+ ObjCQualifiedInterfaceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // No Match;
+ ObjCQualifiedInterfaceType *QType =
+ new (*this,8) ObjCQualifiedInterfaceType(Decl, Protocols, NumProtocols);
+
+ Types.push_back(QType);
+ ObjCQualifiedInterfaceTypes.InsertNode(QType, InsertPos);
+ return QualType(QType, 0);
+}
+
+/// getObjCQualifiedIdType - Return an ObjCQualifiedIdType for the 'id' decl
+/// and the conforming protocol list.
+QualType ASTContext::getObjCQualifiedIdType(ObjCProtocolDecl **Protocols,
+ unsigned NumProtocols) {
+ // Sort the protocol list alphabetically to canonicalize it.
+ SortAndUniqueProtocols(Protocols, NumProtocols);
+
+ llvm::FoldingSetNodeID ID;
+ ObjCQualifiedIdType::Profile(ID, Protocols, NumProtocols);
+
+ void *InsertPos = 0;
+ if (ObjCQualifiedIdType *QT =
+ ObjCQualifiedIdTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // No Match;
+ ObjCQualifiedIdType *QType =
+ new (*this,8) ObjCQualifiedIdType(Protocols, NumProtocols);
+ Types.push_back(QType);
+ ObjCQualifiedIdTypes.InsertNode(QType, InsertPos);
+ return QualType(QType, 0);
+}
+
+/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
+/// TypeOfExprType AST's (since expression's are never shared). For example,
+/// multiple declarations that refer to "typeof(x)" all contain different
+/// DeclRefExpr's. This doesn't effect the type checker, since it operates
+/// on canonical type's (which are always unique).
+QualType ASTContext::getTypeOfExprType(Expr *tofExpr) {
+ QualType Canonical = getCanonicalType(tofExpr->getType());
+ TypeOfExprType *toe = new (*this,8) TypeOfExprType(tofExpr, Canonical);
+ Types.push_back(toe);
+ return QualType(toe, 0);
+}
+
+/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
+/// TypeOfType AST's. The only motivation to unique these nodes would be
+/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
+/// an issue. This doesn't effect the type checker, since it operates
+/// on canonical type's (which are always unique).
+QualType ASTContext::getTypeOfType(QualType tofType) {
+ QualType Canonical = getCanonicalType(tofType);
+ TypeOfType *tot = new (*this,8) TypeOfType(tofType, Canonical);
+ Types.push_back(tot);
+ return QualType(tot, 0);
+}
+
+/// getTagDeclType - Return the unique reference to the type for the
+/// specified TagDecl (struct/union/class/enum) decl.
+QualType ASTContext::getTagDeclType(TagDecl *Decl) {
+ assert (Decl);
+ return getTypeDeclType(Decl);
+}
+
+/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
+/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
+/// needs to agree with the definition in <stddef.h>.
+QualType ASTContext::getSizeType() const {
+ return getFromTargetType(Target.getSizeType());
+}
+
+/// getSignedWCharType - Return the type of "signed wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getSignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return WCharTy;
+}
+
+/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getUnsignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return UnsignedIntTy;
+}
+
+/// getPointerDiffType - Return the unique type for "ptrdiff_t" (ref?)
+/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
+QualType ASTContext::getPointerDiffType() const {
+ return getFromTargetType(Target.getPtrDiffType(0));
+}
+
+//===----------------------------------------------------------------------===//
+// Type Operators
+//===----------------------------------------------------------------------===//
+
+/// getCanonicalType - Return the canonical (structural) type corresponding to
+/// the specified potentially non-canonical type. The non-canonical version
+/// of a type may have many "decorated" versions of types. Decorators can
+/// include typedefs, 'typeof' operators, etc. The returned type is guaranteed
+/// to be free of any of these, allowing two canonical types to be compared
+/// for exact equality with a simple pointer comparison.
+QualType ASTContext::getCanonicalType(QualType T) {
+ QualType CanType = T.getTypePtr()->getCanonicalTypeInternal();
+
+ // If the result has type qualifiers, make sure to canonicalize them as well.
+ unsigned TypeQuals = T.getCVRQualifiers() | CanType.getCVRQualifiers();
+ if (TypeQuals == 0) return CanType;
+
+ // If the type qualifiers are on an array type, get the canonical type of the
+ // array with the qualifiers applied to the element type.
+ ArrayType *AT = dyn_cast<ArrayType>(CanType);
+ if (!AT)
+ return CanType.getQualifiedType(TypeQuals);
+
+ // Get the canonical version of the element with the extra qualifiers on it.
+ // This can recursively sink qualifiers through multiple levels of arrays.
+ QualType NewEltTy=AT->getElementType().getWithAdditionalQualifiers(TypeQuals);
+ NewEltTy = getCanonicalType(NewEltTy);
+
+ if (ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ return getConstantArrayType(NewEltTy, CAT->getSize(),CAT->getSizeModifier(),
+ CAT->getIndexTypeQualifier());
+ if (IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT))
+ return getIncompleteArrayType(NewEltTy, IAT->getSizeModifier(),
+ IAT->getIndexTypeQualifier());
+
+ if (DependentSizedArrayType *DSAT = dyn_cast<DependentSizedArrayType>(AT))
+ return getDependentSizedArrayType(NewEltTy, DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(),
+ DSAT->getIndexTypeQualifier());
+
+ VariableArrayType *VAT = cast<VariableArrayType>(AT);
+ return getVariableArrayType(NewEltTy, VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeQualifier());
+}
+
+Decl *ASTContext::getCanonicalDecl(Decl *D) {
+ if (!D)
+ return 0;
+
+ if (TagDecl *Tag = dyn_cast<TagDecl>(D)) {
+ QualType T = getTagDeclType(Tag);
+ return cast<TagDecl>(cast<TagType>(T.getTypePtr()->CanonicalType)
+ ->getDecl());
+ }
+
+ if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(D)) {
+ while (Template->getPreviousDeclaration())
+ Template = Template->getPreviousDeclaration();
+ return Template;
+ }
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ while (Function->getPreviousDeclaration())
+ Function = Function->getPreviousDeclaration();
+ return const_cast<FunctionDecl *>(Function);
+ }
+
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ while (Var->getPreviousDeclaration())
+ Var = Var->getPreviousDeclaration();
+ return const_cast<VarDecl *>(Var);
+ }
+
+ return D;
+}
+
+TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) {
+ // If this template name refers to a template, the canonical
+ // template name merely stores the template itself.
+ if (TemplateDecl *Template = Name.getAsTemplateDecl())
+ return TemplateName(cast<TemplateDecl>(getCanonicalDecl(Template)));
+
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ assert(DTN && "Non-dependent template names must refer to template decls.");
+ return DTN->CanonicalTemplateName;
+}
+
+NestedNameSpecifier *
+ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ if (!NNS)
+ return 0;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ // Canonicalize the prefix but keep the identifier the same.
+ return NestedNameSpecifier::Create(*this,
+ getCanonicalNestedNameSpecifier(NNS->getPrefix()),
+ NNS->getAsIdentifier());
+
+ case NestedNameSpecifier::Namespace:
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ return NestedNameSpecifier::Create(*this, 0, NNS->getAsNamespace());
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
+ NestedNameSpecifier *Prefix = 0;
+
+ // FIXME: This isn't the right check!
+ if (T->isDependentType())
+ Prefix = getCanonicalNestedNameSpecifier(NNS->getPrefix());
+
+ return NestedNameSpecifier::Create(*this, Prefix,
+ NNS->getKind() == NestedNameSpecifier::TypeSpecWithTemplate,
+ T.getTypePtr());
+ }
+
+ case NestedNameSpecifier::Global:
+ // The global specifier is canonical and unique.
+ return NNS;
+ }
+
+ // Required to silence a GCC warning
+ return 0;
+}
+
+
+const ArrayType *ASTContext::getAsArrayType(QualType T) {
+ // Handle the non-qualified case efficiently.
+ if (T.getCVRQualifiers() == 0) {
+ // Handle the common positive case fast.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return AT;
+ }
+
+ // Handle the common negative case fast, ignoring CVR qualifiers.
+ QualType CType = T->getCanonicalTypeInternal();
+
+ // Make sure to look through type qualifiers (like ExtQuals) for the negative
+ // test.
+ if (!isa<ArrayType>(CType) &&
+ !isa<ArrayType>(CType.getUnqualifiedType()))
+ return 0;
+
+ // Apply any CVR qualifiers from the array type to the element type. This
+ // implements C99 6.7.3p8: "If the specification of an array type includes
+ // any type qualifiers, the element type is so qualified, not the array type."
+
+ // If we get here, we either have type qualifiers on the type, or we have
+ // sugar such as a typedef in the way. If we have type qualifiers on the type
+ // we must propagate them down into the elemeng type.
+ unsigned CVRQuals = T.getCVRQualifiers();
+ unsigned AddrSpace = 0;
+ Type *Ty = T.getTypePtr();
+
+ // Rip through ExtQualType's and typedefs to get to a concrete type.
+ while (1) {
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(Ty)) {
+ AddrSpace = EXTQT->getAddressSpace();
+ Ty = EXTQT->getBaseType();
+ } else {
+ T = Ty->getDesugaredType();
+ if (T.getTypePtr() == Ty && T.getCVRQualifiers() == 0)
+ break;
+ CVRQuals |= T.getCVRQualifiers();
+ Ty = T.getTypePtr();
+ }
+ }
+
+ // If we have a simple case, just return now.
+ const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
+ if (ATy == 0 || (AddrSpace == 0 && CVRQuals == 0))
+ return ATy;
+
+ // Otherwise, we have an array and we have qualifiers on it. Push the
+ // qualifiers into the array element type and return a new array type.
+ // Get the canonical version of the element with the extra qualifiers on it.
+ // This can recursively sink qualifiers through multiple levels of arrays.
+ QualType NewEltTy = ATy->getElementType();
+ if (AddrSpace)
+ NewEltTy = getAddrSpaceQualType(NewEltTy, AddrSpace);
+ NewEltTy = NewEltTy.getWithAdditionalQualifiers(CVRQuals);
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
+ return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
+ CAT->getSizeModifier(),
+ CAT->getIndexTypeQualifier()));
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy))
+ return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
+ IAT->getSizeModifier(),
+ IAT->getIndexTypeQualifier()));
+
+ if (const DependentSizedArrayType *DSAT
+ = dyn_cast<DependentSizedArrayType>(ATy))
+ return cast<ArrayType>(
+ getDependentSizedArrayType(NewEltTy,
+ DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(),
+ DSAT->getIndexTypeQualifier()));
+
+ const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
+ return cast<ArrayType>(getVariableArrayType(NewEltTy, VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeQualifier()));
+}
+
+
+/// getArrayDecayedType - Return the properly qualified result of decaying the
+/// specified array type to a pointer. This operation is non-trivial when
+/// handling typedefs etc. The canonical type of "T" must be an array type,
+/// this returns a pointer to a properly qualified element of the array.
+///
+/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
+QualType ASTContext::getArrayDecayedType(QualType Ty) {
+ // Get the element type with 'getAsArrayType' so that we don't lose any
+ // typedefs in the element type of the array. This also handles propagation
+ // of type qualifiers from the array type into the element type if present
+ // (C99 6.7.3p8).
+ const ArrayType *PrettyArrayType = getAsArrayType(Ty);
+ assert(PrettyArrayType && "Not an array type!");
+
+ QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
+
+ // int x[restrict 4] -> int *restrict
+ return PtrTy.getQualifiedType(PrettyArrayType->getIndexTypeQualifier());
+}
+
+QualType ASTContext::getBaseElementType(const VariableArrayType *VAT) {
+ QualType ElemTy = VAT->getElementType();
+
+ if (const VariableArrayType *VAT = getAsVariableArrayType(ElemTy))
+ return getBaseElementType(VAT);
+
+ return ElemTy;
+}
+
+/// getFloatingRank - Return a relative rank for floating point types.
+/// This routine will assert if passed a built-in type that isn't a float.
+static FloatingRank getFloatingRank(QualType T) {
+ if (const ComplexType *CT = T->getAsComplexType())
+ return getFloatingRank(CT->getElementType());
+
+ assert(T->getAsBuiltinType() && "getFloatingRank(): not a floating type");
+ switch (T->getAsBuiltinType()->getKind()) {
+ default: assert(0 && "getFloatingRank(): not a floating type");
+ case BuiltinType::Float: return FloatRank;
+ case BuiltinType::Double: return DoubleRank;
+ case BuiltinType::LongDouble: return LongDoubleRank;
+ }
+}
+
+/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
+/// point or a complex type (based on typeDomain/typeSize).
+/// 'typeDomain' is a real floating point or complex type.
+/// 'typeSize' is a real floating point or complex type.
+QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
+ QualType Domain) const {
+ FloatingRank EltRank = getFloatingRank(Size);
+ if (Domain->isComplexType()) {
+ switch (EltRank) {
+ default: assert(0 && "getFloatingRank(): illegal value for rank");
+ case FloatRank: return FloatComplexTy;
+ case DoubleRank: return DoubleComplexTy;
+ case LongDoubleRank: return LongDoubleComplexTy;
+ }
+ }
+
+ assert(Domain->isRealFloatingType() && "Unknown domain!");
+ switch (EltRank) {
+ default: assert(0 && "getFloatingRank(): illegal value for rank");
+ case FloatRank: return FloatTy;
+ case DoubleRank: return DoubleTy;
+ case LongDoubleRank: return LongDoubleTy;
+ }
+}
+
+/// getFloatingTypeOrder - Compare the rank of the two specified floating
+/// point types, ignoring the domain of the type (i.e. 'double' ==
+/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) {
+ FloatingRank LHSR = getFloatingRank(LHS);
+ FloatingRank RHSR = getFloatingRank(RHS);
+
+ if (LHSR == RHSR)
+ return 0;
+ if (LHSR > RHSR)
+ return 1;
+ return -1;
+}
+
+/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
+/// routine will assert if passed a built-in type that isn't an integer or enum,
+/// or if it is not canonicalized.
+unsigned ASTContext::getIntegerRank(Type *T) {
+ assert(T->isCanonical() && "T should be canonicalized");
+ if (EnumType* ET = dyn_cast<EnumType>(T))
+ T = ET->getDecl()->getIntegerType().getTypePtr();
+
+ // There are two things which impact the integer rank: the width, and
+ // the ordering of builtins. The builtin ordering is encoded in the
+ // bottom three bits; the width is encoded in the bits above that.
+ if (FixedWidthIntType* FWIT = dyn_cast<FixedWidthIntType>(T)) {
+ return FWIT->getWidth() << 3;
+ }
+
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: assert(0 && "getIntegerRank(): not a built-in integer");
+ case BuiltinType::Bool:
+ return 1 + (getIntWidth(BoolTy) << 3);
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ return 2 + (getIntWidth(CharTy) << 3);
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return 3 + (getIntWidth(ShortTy) << 3);
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return 4 + (getIntWidth(IntTy) << 3);
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ return 5 + (getIntWidth(LongTy) << 3);
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ return 6 + (getIntWidth(LongLongTy) << 3);
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return 7 + (getIntWidth(Int128Ty) << 3);
+ }
+}
+
+/// getIntegerTypeOrder - Returns the highest ranked integer type:
+/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) {
+ Type *LHSC = getCanonicalType(LHS).getTypePtr();
+ Type *RHSC = getCanonicalType(RHS).getTypePtr();
+ if (LHSC == RHSC) return 0;
+
+ bool LHSUnsigned = LHSC->isUnsignedIntegerType();
+ bool RHSUnsigned = RHSC->isUnsignedIntegerType();
+
+ unsigned LHSRank = getIntegerRank(LHSC);
+ unsigned RHSRank = getIntegerRank(RHSC);
+
+ if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
+ if (LHSRank == RHSRank) return 0;
+ return LHSRank > RHSRank ? 1 : -1;
+ }
+
+ // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
+ if (LHSUnsigned) {
+ // If the unsigned [LHS] type is larger, return it.
+ if (LHSRank >= RHSRank)
+ return 1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return -1;
+ }
+
+ // If the unsigned [RHS] type is larger, return it.
+ if (RHSRank >= LHSRank)
+ return -1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return 1;
+}
+
+// getCFConstantStringType - Return the type used for constant CFStrings.
+QualType ASTContext::getCFConstantStringType() {
+ if (!CFConstantStringTypeDecl) {
+ CFConstantStringTypeDecl =
+ RecordDecl::Create(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ &Idents.get("NSConstantString"));
+ QualType FieldTypes[4];
+
+ // const int *isa;
+ FieldTypes[0] = getPointerType(IntTy.getQualifiedType(QualType::Const));
+ // int flags;
+ FieldTypes[1] = IntTy;
+ // const char *str;
+ FieldTypes[2] = getPointerType(CharTy.getQualifiedType(QualType::Const));
+ // long length;
+ FieldTypes[3] = LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl,
+ SourceLocation(), 0,
+ FieldTypes[i], /*BitWidth=*/0,
+ /*Mutable=*/false);
+ CFConstantStringTypeDecl->addDecl(*this, Field);
+ }
+
+ CFConstantStringTypeDecl->completeDefinition(*this);
+ }
+
+ return getTagDeclType(CFConstantStringTypeDecl);
+}
+
+void ASTContext::setCFConstantStringType(QualType T) {
+ const RecordType *Rec = T->getAsRecordType();
+ assert(Rec && "Invalid CFConstantStringType");
+ CFConstantStringTypeDecl = Rec->getDecl();
+}
+
+QualType ASTContext::getObjCFastEnumerationStateType()
+{
+ if (!ObjCFastEnumerationStateTypeDecl) {
+ ObjCFastEnumerationStateTypeDecl =
+ RecordDecl::Create(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ &Idents.get("__objcFastEnumerationState"));
+
+ QualType FieldTypes[] = {
+ UnsignedLongTy,
+ getPointerType(ObjCIdType),
+ getPointerType(UnsignedLongTy),
+ getConstantArrayType(UnsignedLongTy,
+ llvm::APInt(32, 5), ArrayType::Normal, 0)
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this,
+ ObjCFastEnumerationStateTypeDecl,
+ SourceLocation(), 0,
+ FieldTypes[i], /*BitWidth=*/0,
+ /*Mutable=*/false);
+ ObjCFastEnumerationStateTypeDecl->addDecl(*this, Field);
+ }
+
+ ObjCFastEnumerationStateTypeDecl->completeDefinition(*this);
+ }
+
+ return getTagDeclType(ObjCFastEnumerationStateTypeDecl);
+}
+
+void ASTContext::setObjCFastEnumerationStateType(QualType T) {
+ const RecordType *Rec = T->getAsRecordType();
+ assert(Rec && "Invalid ObjCFAstEnumerationStateType");
+ ObjCFastEnumerationStateTypeDecl = Rec->getDecl();
+}
+
+// This returns true if a type has been typedefed to BOOL:
+// typedef <type> BOOL;
+static bool isTypeTypedefedAsBOOL(QualType T) {
+ if (const TypedefType *TT = dyn_cast<TypedefType>(T))
+ if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
+ return II->isStr("BOOL");
+
+ return false;
+}
+
+/// getObjCEncodingTypeSize returns size of type for objective-c encoding
+/// purpose.
+int ASTContext::getObjCEncodingTypeSize(QualType type) {
+ uint64_t sz = getTypeSize(type);
+
+ // Make all integer and enum types at least as large as an int
+ if (sz > 0 && type->isIntegralType())
+ sz = std::max(sz, getTypeSize(IntTy));
+ // Treat arrays as pointers, since that's how they're passed in.
+ else if (type->isArrayType())
+ sz = getTypeSize(VoidPtrTy);
+ return sz / getTypeSize(CharTy);
+}
+
+/// getObjCEncodingForMethodDecl - Return the encoded type for this method
+/// declaration.
+void ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
+ std::string& S) {
+ // FIXME: This is not very efficient.
+ // Encode type qualifer, 'in', 'inout', etc. for the return type.
+ getObjCEncodingForTypeQualifier(Decl->getObjCDeclQualifier(), S);
+ // Encode result type.
+ getObjCEncodingForType(Decl->getResultType(), S);
+ // Compute size of all parameters.
+ // Start with computing size of a pointer in number of bytes.
+ // FIXME: There might(should) be a better way of doing this computation!
+ SourceLocation Loc;
+ int PtrSize = getTypeSize(VoidPtrTy) / getTypeSize(CharTy);
+ // The first two arguments (self and _cmd) are pointers; account for
+ // their size.
+ int ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ int sz = getObjCEncodingTypeSize(PType);
+ assert (sz > 0 && "getObjCEncodingForMethodDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += llvm::utostr(ParmOffset);
+ S += "@0:";
+ S += llvm::utostr(PtrSize);
+
+ // Argument types.
+ ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ // Process argument qualifiers for user supplied arguments; such as,
+ // 'in', 'inout', etc.
+ getObjCEncodingForTypeQualifier(PVDecl->getObjCDeclQualifier(), S);
+ getObjCEncodingForType(PType, S);
+ S += llvm::utostr(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+}
+
+/// getObjCEncodingForPropertyDecl - Return the encoded type for this
+/// property declaration. If non-NULL, Container must be either an
+/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
+/// NULL when getting encodings for protocol properties.
+/// Property attributes are stored as a comma-delimited C string. The simple
+/// attributes readonly and bycopy are encoded as single characters. The
+/// parametrized attributes, getter=name, setter=name, and ivar=name, are
+/// encoded as single characters, followed by an identifier. Property types
+/// are also encoded as a parametrized attribute. The characters used to encode
+/// these attributes are defined by the following enumeration:
+/// @code
+/// enum PropertyAttributes {
+/// kPropertyReadOnly = 'R', // property is read-only.
+/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
+/// kPropertyByref = '&', // property is a reference to the value last assigned
+/// kPropertyDynamic = 'D', // property is dynamic
+/// kPropertyGetter = 'G', // followed by getter selector name
+/// kPropertySetter = 'S', // followed by setter selector name
+/// kPropertyInstanceVariable = 'V' // followed by instance variable name
+/// kPropertyType = 't' // followed by old-style type encoding.
+/// kPropertyWeak = 'W' // 'weak' property
+/// kPropertyStrong = 'P' // property GC'able
+/// kPropertyNonAtomic = 'N' // property non-atomic
+/// };
+/// @endcode
+void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
+ const Decl *Container,
+ std::string& S) {
+ // Collect information from the property implementation decl(s).
+ bool Dynamic = false;
+ ObjCPropertyImplDecl *SynthesizePID = 0;
+
+ // FIXME: Duplicated code due to poor abstraction.
+ if (Container) {
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(Container)) {
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ i = CID->propimpl_begin(*this), e = CID->propimpl_end(*this);
+ i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl() == PD) {
+ if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
+ Dynamic = true;
+ } else {
+ SynthesizePID = PID;
+ }
+ }
+ }
+ } else {
+ const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ i = OID->propimpl_begin(*this), e = OID->propimpl_end(*this);
+ i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl() == PD) {
+ if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
+ Dynamic = true;
+ } else {
+ SynthesizePID = PID;
+ }
+ }
+ }
+ }
+ }
+
+ // FIXME: This is not very efficient.
+ S = "T";
+
+ // Encode result type.
+ // GCC has some special rules regarding encoding of properties which
+ // closely resembles encoding of ivars.
+ getObjCEncodingForTypeImpl(PD->getType(), S, true, true, 0,
+ true /* outermost type */,
+ true /* encoding for property */);
+
+ if (PD->isReadOnly()) {
+ S += ",R";
+ } else {
+ switch (PD->getSetterKind()) {
+ case ObjCPropertyDecl::Assign: break;
+ case ObjCPropertyDecl::Copy: S += ",C"; break;
+ case ObjCPropertyDecl::Retain: S += ",&"; break;
+ }
+ }
+
+ // It really isn't clear at all what this means, since properties
+ // are "dynamic by default".
+ if (Dynamic)
+ S += ",D";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ S += ",N";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ S += ",G";
+ S += PD->getGetterName().getAsString();
+ }
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ S += ",S";
+ S += PD->getSetterName().getAsString();
+ }
+
+ if (SynthesizePID) {
+ const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
+ S += ",V";
+ S += OID->getNameAsString();
+ }
+
+ // FIXME: OBJCGC: weak & strong
+}
+
+/// getLegacyIntegralTypeEncoding -
+/// Another legacy compatibility encoding: 32-bit longs are encoded as
+/// 'l' or 'L' , but not always. For typedefs, we need to use
+/// 'i' or 'I' instead if encoding a struct field, or a pointer!
+///
+void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
+ if (dyn_cast<TypedefType>(PointeeTy.getTypePtr())) {
+ if (const BuiltinType *BT = PointeeTy->getAsBuiltinType()) {
+ if (BT->getKind() == BuiltinType::ULong &&
+ ((const_cast<ASTContext *>(this))->getIntWidth(PointeeTy) == 32))
+ PointeeTy = UnsignedIntTy;
+ else
+ if (BT->getKind() == BuiltinType::Long &&
+ ((const_cast<ASTContext *>(this))->getIntWidth(PointeeTy) == 32))
+ PointeeTy = IntTy;
+ }
+ }
+}
+
+void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
+ const FieldDecl *Field) {
+ // We follow the behavior of gcc, expanding structures which are
+ // directly pointed to, and expanding embedded structures. Note that
+ // these rules are sufficient to prevent recursive encoding of the
+ // same type.
+ getObjCEncodingForTypeImpl(T, S, true, true, Field,
+ true /* outermost type */);
+}
+
+static void EncodeBitField(const ASTContext *Context, std::string& S,
+ const FieldDecl *FD) {
+ const Expr *E = FD->getBitWidth();
+ assert(E && "bitfield width not there - getObjCEncodingForTypeImpl");
+ ASTContext *Ctx = const_cast<ASTContext*>(Context);
+ unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue();
+ S += 'b';
+ S += llvm::utostr(N);
+}
+
+void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
+ bool ExpandPointedToStructures,
+ bool ExpandStructures,
+ const FieldDecl *FD,
+ bool OutermostType,
+ bool EncodingProperty) {
+ if (const BuiltinType *BT = T->getAsBuiltinType()) {
+ if (FD && FD->isBitField()) {
+ EncodeBitField(this, S, FD);
+ }
+ else {
+ char encoding;
+ switch (BT->getKind()) {
+ default: assert(0 && "Unhandled builtin type kind");
+ case BuiltinType::Void: encoding = 'v'; break;
+ case BuiltinType::Bool: encoding = 'B'; break;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar: encoding = 'C'; break;
+ case BuiltinType::UShort: encoding = 'S'; break;
+ case BuiltinType::UInt: encoding = 'I'; break;
+ case BuiltinType::ULong:
+ encoding =
+ (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'L' : 'Q';
+ break;
+ case BuiltinType::UInt128: encoding = 'T'; break;
+ case BuiltinType::ULongLong: encoding = 'Q'; break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: encoding = 'c'; break;
+ case BuiltinType::Short: encoding = 's'; break;
+ case BuiltinType::Int: encoding = 'i'; break;
+ case BuiltinType::Long:
+ encoding =
+ (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'l' : 'q';
+ break;
+ case BuiltinType::LongLong: encoding = 'q'; break;
+ case BuiltinType::Int128: encoding = 't'; break;
+ case BuiltinType::Float: encoding = 'f'; break;
+ case BuiltinType::Double: encoding = 'd'; break;
+ case BuiltinType::LongDouble: encoding = 'd'; break;
+ }
+
+ S += encoding;
+ }
+ } else if (const ComplexType *CT = T->getAsComplexType()) {
+ S += 'j';
+ getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, 0, false,
+ false);
+ } else if (T->isObjCQualifiedIdType()) {
+ getObjCEncodingForTypeImpl(getObjCIdType(), S,
+ ExpandPointedToStructures,
+ ExpandStructures, FD);
+ if (FD || EncodingProperty) {
+ // Note that we do extended encoding of protocol qualifer list
+ // Only when doing ivar or property encoding.
+ const ObjCQualifiedIdType *QIDT = T->getAsObjCQualifiedIdType();
+ S += '"';
+ for (ObjCQualifiedIdType::qual_iterator I = QIDT->qual_begin(),
+ E = QIDT->qual_end(); I != E; ++I) {
+ S += '<';
+ S += (*I)->getNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ }
+ else if (const PointerType *PT = T->getAsPointerType()) {
+ QualType PointeeTy = PT->getPointeeType();
+ bool isReadOnly = false;
+ // For historical/compatibility reasons, the read-only qualifier of the
+ // pointee gets emitted _before_ the '^'. The read-only qualifier of
+ // the pointer itself gets ignored, _unless_ we are looking at a typedef!
+ // Also, do not emit the 'r' for anything but the outermost type!
+ if (dyn_cast<TypedefType>(T.getTypePtr())) {
+ if (OutermostType && T.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ }
+ else if (OutermostType) {
+ QualType P = PointeeTy;
+ while (P->getAsPointerType())
+ P = P->getAsPointerType()->getPointeeType();
+ if (P.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ }
+ if (isReadOnly) {
+ // Another legacy compatibility encoding. Some ObjC qualifier and type
+ // combinations need to be rearranged.
+ // Rewrite "in const" from "nr" to "rn"
+ const char * s = S.c_str();
+ int len = S.length();
+ if (len >= 2 && s[len-2] == 'n' && s[len-1] == 'r') {
+ std::string replace = "rn";
+ S.replace(S.end()-2, S.end(), replace);
+ }
+ }
+ if (isObjCIdStructType(PointeeTy)) {
+ S += '@';
+ return;
+ }
+ else if (PointeeTy->isObjCInterfaceType()) {
+ if (!EncodingProperty &&
+ isa<TypedefType>(PointeeTy.getTypePtr())) {
+ // Another historical/compatibility reason.
+ // We encode the underlying type which comes out as
+ // {...};
+ S += '^';
+ getObjCEncodingForTypeImpl(PointeeTy, S,
+ false, ExpandPointedToStructures,
+ NULL);
+ return;
+ }
+ S += '@';
+ if (FD || EncodingProperty) {
+ const ObjCInterfaceType *OIT =
+ PointeeTy.getUnqualifiedType()->getAsObjCInterfaceType();
+ ObjCInterfaceDecl *OI = OIT->getDecl();
+ S += '"';
+ S += OI->getNameAsCString();
+ for (ObjCInterfaceType::qual_iterator I = OIT->qual_begin(),
+ E = OIT->qual_end(); I != E; ++I) {
+ S += '<';
+ S += (*I)->getNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ } else if (isObjCClassStructType(PointeeTy)) {
+ S += '#';
+ return;
+ } else if (isObjCSelType(PointeeTy)) {
+ S += ':';
+ return;
+ }
+
+ if (PointeeTy->isCharType()) {
+ // char pointer types should be encoded as '*' unless it is a
+ // type that has been typedef'd to 'BOOL'.
+ if (!isTypeTypedefedAsBOOL(PointeeTy)) {
+ S += '*';
+ return;
+ }
+ }
+
+ S += '^';
+ getLegacyIntegralTypeEncoding(PointeeTy);
+
+ getObjCEncodingForTypeImpl(PointeeTy, S,
+ false, ExpandPointedToStructures,
+ NULL);
+ } else if (const ArrayType *AT =
+ // Ignore type qualifiers etc.
+ dyn_cast<ArrayType>(T->getCanonicalTypeInternal())) {
+ if (isa<IncompleteArrayType>(AT)) {
+ // Incomplete arrays are encoded as a pointer to the array element.
+ S += '^';
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD);
+ } else {
+ S += '[';
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ S += llvm::utostr(CAT->getSize().getZExtValue());
+ else {
+ //Variable length arrays are encoded as a regular array with 0 elements.
+ assert(isa<VariableArrayType>(AT) && "Unknown array type!");
+ S += '0';
+ }
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD);
+ S += ']';
+ }
+ } else if (T->getAsFunctionType()) {
+ S += '?';
+ } else if (const RecordType *RTy = T->getAsRecordType()) {
+ RecordDecl *RDecl = RTy->getDecl();
+ S += RDecl->isUnion() ? '(' : '{';
+ // Anonymous structures print as '?'
+ if (const IdentifierInfo *II = RDecl->getIdentifier()) {
+ S += II->getName();
+ } else {
+ S += '?';
+ }
+ if (ExpandStructures) {
+ S += '=';
+ for (RecordDecl::field_iterator Field = RDecl->field_begin(*this),
+ FieldEnd = RDecl->field_end(*this);
+ Field != FieldEnd; ++Field) {
+ if (FD) {
+ S += '"';
+ S += Field->getNameAsString();
+ S += '"';
+ }
+
+ // Special case bit-fields.
+ if (Field->isBitField()) {
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
+ (*Field));
+ } else {
+ QualType qt = Field->getType();
+ getLegacyIntegralTypeEncoding(qt);
+ getObjCEncodingForTypeImpl(qt, S, false, true,
+ FD);
+ }
+ }
+ }
+ S += RDecl->isUnion() ? ')' : '}';
+ } else if (T->isEnumeralType()) {
+ if (FD && FD->isBitField())
+ EncodeBitField(this, S, FD);
+ else
+ S += 'i';
+ } else if (T->isBlockPointerType()) {
+ S += "@?"; // Unlike a pointer-to-function, which is "^?".
+ } else if (T->isObjCInterfaceType()) {
+ // @encode(class_name)
+ ObjCInterfaceDecl *OI = T->getAsObjCInterfaceType()->getDecl();
+ S += '{';
+ const IdentifierInfo *II = OI->getIdentifier();
+ S += II->getName();
+ S += '=';
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+ CollectObjCIvars(OI, RecFields);
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ if (RecFields[i]->isBitField())
+ getObjCEncodingForTypeImpl(RecFields[i]->getType(), S, false, true,
+ RecFields[i]);
+ else
+ getObjCEncodingForTypeImpl(RecFields[i]->getType(), S, false, true,
+ FD);
+ }
+ S += '}';
+ }
+ else
+ assert(0 && "@encode for type not implemented!");
+}
+
+void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
+ std::string& S) const {
+ if (QT & Decl::OBJC_TQ_In)
+ S += 'n';
+ if (QT & Decl::OBJC_TQ_Inout)
+ S += 'N';
+ if (QT & Decl::OBJC_TQ_Out)
+ S += 'o';
+ if (QT & Decl::OBJC_TQ_Bycopy)
+ S += 'O';
+ if (QT & Decl::OBJC_TQ_Byref)
+ S += 'R';
+ if (QT & Decl::OBJC_TQ_Oneway)
+ S += 'V';
+}
+
+void ASTContext::setBuiltinVaListType(QualType T)
+{
+ assert(BuiltinVaListType.isNull() && "__builtin_va_list type already set!");
+
+ BuiltinVaListType = T;
+}
+
+void ASTContext::setObjCIdType(QualType T)
+{
+ ObjCIdType = T;
+
+ const TypedefType *TT = T->getAsTypedefType();
+ if (!TT)
+ return;
+
+ TypedefDecl *TD = TT->getDecl();
+
+ // typedef struct objc_object *id;
+ const PointerType *ptr = TD->getUnderlyingType()->getAsPointerType();
+ // User error - caller will issue diagnostics.
+ if (!ptr)
+ return;
+ const RecordType *rec = ptr->getPointeeType()->getAsStructureType();
+ // User error - caller will issue diagnostics.
+ if (!rec)
+ return;
+ IdStructType = rec;
+}
+
+void ASTContext::setObjCSelType(QualType T)
+{
+ ObjCSelType = T;
+
+ const TypedefType *TT = T->getAsTypedefType();
+ if (!TT)
+ return;
+ TypedefDecl *TD = TT->getDecl();
+
+ // typedef struct objc_selector *SEL;
+ const PointerType *ptr = TD->getUnderlyingType()->getAsPointerType();
+ if (!ptr)
+ return;
+ const RecordType *rec = ptr->getPointeeType()->getAsStructureType();
+ if (!rec)
+ return;
+ SelStructType = rec;
+}
+
+void ASTContext::setObjCProtoType(QualType QT)
+{
+ ObjCProtoType = QT;
+}
+
+void ASTContext::setObjCClassType(QualType T)
+{
+ ObjCClassType = T;
+
+ const TypedefType *TT = T->getAsTypedefType();
+ if (!TT)
+ return;
+ TypedefDecl *TD = TT->getDecl();
+
+ // typedef struct objc_class *Class;
+ const PointerType *ptr = TD->getUnderlyingType()->getAsPointerType();
+ assert(ptr && "'Class' incorrectly typed");
+ const RecordType *rec = ptr->getPointeeType()->getAsStructureType();
+ assert(rec && "'Class' incorrectly typed");
+ ClassStructType = rec;
+}
+
+void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
+ assert(ObjCConstantStringType.isNull() &&
+ "'NSConstantString' type already set!");
+
+ ObjCConstantStringType = getObjCInterfaceType(Decl);
+}
+
+/// \brief Retrieve the template name that represents a qualified
+/// template name such as \c std::vector.
+TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateDecl *Template) {
+ llvm::FoldingSetNodeID ID;
+ QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
+
+ void *InsertPos = 0;
+ QualifiedTemplateName *QTN =
+ QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ if (!QTN) {
+ QTN = new (*this,4) QualifiedTemplateName(NNS, TemplateKeyword, Template);
+ QualifiedTemplateNames.InsertNode(QTN, InsertPos);
+ }
+
+ return TemplateName(QTN);
+}
+
+/// \brief Retrieve the template name that represents a dependent
+/// template name such as \c MetaFun::template apply.
+TemplateName ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name) {
+ assert(NNS->isDependent() && "Nested name specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateName::Profile(ID, NNS, Name);
+
+ void *InsertPos = 0;
+ DependentTemplateName *QTN =
+ DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (QTN)
+ return TemplateName(QTN);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS == NNS) {
+ QTN = new (*this,4) DependentTemplateName(NNS, Name);
+ } else {
+ TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
+ QTN = new (*this,4) DependentTemplateName(NNS, Name, Canon);
+ }
+
+ DependentTemplateNames.InsertNode(QTN, InsertPos);
+ return TemplateName(QTN);
+}
+
+/// getFromTargetType - Given one of the integer types provided by
+/// TargetInfo, produce the corresponding type. The unsigned @p Type
+/// is actually a value of type @c TargetInfo::IntType.
+QualType ASTContext::getFromTargetType(unsigned Type) const {
+ switch (Type) {
+ case TargetInfo::NoInt: return QualType();
+ case TargetInfo::SignedShort: return ShortTy;
+ case TargetInfo::UnsignedShort: return UnsignedShortTy;
+ case TargetInfo::SignedInt: return IntTy;
+ case TargetInfo::UnsignedInt: return UnsignedIntTy;
+ case TargetInfo::SignedLong: return LongTy;
+ case TargetInfo::UnsignedLong: return UnsignedLongTy;
+ case TargetInfo::SignedLongLong: return LongLongTy;
+ case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
+ }
+
+ assert(false && "Unhandled TargetInfo::IntType value");
+ return QualType();
+}
+
+//===----------------------------------------------------------------------===//
+// Type Predicates.
+//===----------------------------------------------------------------------===//
+
+/// isObjCNSObjectType - Return true if this is an NSObject object using
+/// NSObject attribute on a c-style pointer type.
+/// FIXME - Make it work directly on types.
+///
+bool ASTContext::isObjCNSObjectType(QualType Ty) const {
+ if (TypedefType *TDT = dyn_cast<TypedefType>(Ty)) {
+ if (TypedefDecl *TD = TDT->getDecl())
+ if (TD->getAttr<ObjCNSObjectAttr>())
+ return true;
+ }
+ return false;
+}
+
+/// isObjCObjectPointerType - Returns true if type is an Objective-C pointer
+/// to an object type. This includes "id" and "Class" (two 'special' pointers
+/// to struct), Interface* (pointer to ObjCInterfaceType) and id<P> (qualified
+/// ID type).
+bool ASTContext::isObjCObjectPointerType(QualType Ty) const {
+ if (Ty->isObjCQualifiedIdType())
+ return true;
+
+ // Blocks are objects.
+ if (Ty->isBlockPointerType())
+ return true;
+
+ // All other object types are pointers.
+ const PointerType *PT = Ty->getAsPointerType();
+ if (PT == 0)
+ return false;
+
+ // If this a pointer to an interface (e.g. NSString*), it is ok.
+ if (PT->getPointeeType()->isObjCInterfaceType() ||
+ // If is has NSObject attribute, OK as well.
+ isObjCNSObjectType(Ty))
+ return true;
+
+ // Check to see if this is 'id' or 'Class', both of which are typedefs for
+ // pointer types. This looks for the typedef specifically, not for the
+ // underlying type. Iteratively strip off typedefs so that we can handle
+ // typedefs of typedefs.
+ while (TypedefType *TDT = dyn_cast<TypedefType>(Ty)) {
+ if (Ty.getUnqualifiedType() == getObjCIdType() ||
+ Ty.getUnqualifiedType() == getObjCClassType())
+ return true;
+
+ Ty = TDT->getDecl()->getUnderlyingType();
+ }
+
+ return false;
+}
+
+/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
+/// garbage collection attribute.
+///
+QualType::GCAttrTypes ASTContext::getObjCGCAttrKind(const QualType &Ty) const {
+ QualType::GCAttrTypes GCAttrs = QualType::GCNone;
+ if (getLangOptions().ObjC1 &&
+ getLangOptions().getGCMode() != LangOptions::NonGC) {
+ GCAttrs = Ty.getObjCGCAttr();
+ // Default behavious under objective-c's gc is for objective-c pointers
+ // (or pointers to them) be treated as though they were declared
+ // as __strong.
+ if (GCAttrs == QualType::GCNone) {
+ if (isObjCObjectPointerType(Ty))
+ GCAttrs = QualType::Strong;
+ else if (Ty->isPointerType())
+ return getObjCGCAttrKind(Ty->getAsPointerType()->getPointeeType());
+ }
+ // Non-pointers have none gc'able attribute regardless of the attribute
+ // set on them.
+ else if (!isObjCObjectPointerType(Ty) && !Ty->isPointerType())
+ return QualType::GCNone;
+ }
+ return GCAttrs;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Compatibility Testing
+//===----------------------------------------------------------------------===//
+
+/// typesAreBlockCompatible - This routine is called when comparing two
+/// block types. Types must be strictly compatible here. For example,
+/// C unfortunately doesn't produce an error for the following:
+///
+/// int (*emptyArgFunc)();
+/// int (*intArgList)(int) = emptyArgFunc;
+///
+/// For blocks, we will produce an error for the following (similar to C++):
+///
+/// int (^emptyArgBlock)();
+/// int (^intArgBlock)(int) = emptyArgBlock;
+///
+/// FIXME: When the dust settles on this integration, fold this into mergeTypes.
+///
+bool ASTContext::typesAreBlockCompatible(QualType lhs, QualType rhs) {
+ const FunctionType *lbase = lhs->getAsFunctionType();
+ const FunctionType *rbase = rhs->getAsFunctionType();
+ const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase);
+ const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase);
+ if (lproto && rproto == 0)
+ return false;
+ return !mergeTypes(lhs, rhs).isNull();
+}
+
+/// areCompatVectorTypes - Return true if the two specified vector types are
+/// compatible.
+static bool areCompatVectorTypes(const VectorType *LHS,
+ const VectorType *RHS) {
+ assert(LHS->isCanonical() && RHS->isCanonical());
+ return LHS->getElementType() == RHS->getElementType() &&
+ LHS->getNumElements() == RHS->getNumElements();
+}
+
+/// canAssignObjCInterfaces - Return true if the two interface types are
+/// compatible for assignment from RHS to LHS. This handles validation of any
+/// protocol qualifiers on the LHS or RHS.
+///
+bool ASTContext::canAssignObjCInterfaces(const ObjCInterfaceType *LHS,
+ const ObjCInterfaceType *RHS) {
+ // Verify that the base decls are compatible: the RHS must be a subclass of
+ // the LHS.
+ if (!LHS->getDecl()->isSuperClassOf(RHS->getDecl()))
+ return false;
+
+ // RHS must have a superset of the protocols in the LHS. If the LHS is not
+ // protocol qualified at all, then we are good.
+ if (!isa<ObjCQualifiedInterfaceType>(LHS))
+ return true;
+
+ // Okay, we know the LHS has protocol qualifiers. If the RHS doesn't, then it
+ // isn't a superset.
+ if (!isa<ObjCQualifiedInterfaceType>(RHS))
+ return true; // FIXME: should return false!
+
+ // Finally, we must have two protocol-qualified interfaces.
+ const ObjCQualifiedInterfaceType *LHSP =cast<ObjCQualifiedInterfaceType>(LHS);
+ const ObjCQualifiedInterfaceType *RHSP =cast<ObjCQualifiedInterfaceType>(RHS);
+
+ // All LHS protocols must have a presence on the RHS.
+ assert(LHSP->qual_begin() != LHSP->qual_end() && "Empty LHS protocol list?");
+
+ for (ObjCQualifiedInterfaceType::qual_iterator LHSPI = LHSP->qual_begin(),
+ LHSPE = LHSP->qual_end();
+ LHSPI != LHSPE; LHSPI++) {
+ bool RHSImplementsProtocol = false;
+
+ // If the RHS doesn't implement the protocol on the left, the types
+ // are incompatible.
+ for (ObjCQualifiedInterfaceType::qual_iterator RHSPI = RHSP->qual_begin(),
+ RHSPE = RHSP->qual_end();
+ !RHSImplementsProtocol && (RHSPI != RHSPE); RHSPI++) {
+ if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier()))
+ RHSImplementsProtocol = true;
+ }
+ // FIXME: For better diagnostics, consider passing back the protocol name.
+ if (!RHSImplementsProtocol)
+ return false;
+ }
+ // The RHS implements all protocols listed on the LHS.
+ return true;
+}
+
+bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
+ // get the "pointed to" types
+ const PointerType *LHSPT = LHS->getAsPointerType();
+ const PointerType *RHSPT = RHS->getAsPointerType();
+
+ if (!LHSPT || !RHSPT)
+ return false;
+
+ QualType lhptee = LHSPT->getPointeeType();
+ QualType rhptee = RHSPT->getPointeeType();
+ const ObjCInterfaceType* LHSIface = lhptee->getAsObjCInterfaceType();
+ const ObjCInterfaceType* RHSIface = rhptee->getAsObjCInterfaceType();
+ // ID acts sort of like void* for ObjC interfaces
+ if (LHSIface && isObjCIdStructType(rhptee))
+ return true;
+ if (RHSIface && isObjCIdStructType(lhptee))
+ return true;
+ if (!LHSIface || !RHSIface)
+ return false;
+ return canAssignObjCInterfaces(LHSIface, RHSIface) ||
+ canAssignObjCInterfaces(RHSIface, LHSIface);
+}
+
+/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
+/// both shall have the identically qualified version of a compatible type.
+/// C99 6.2.7p1: Two types have compatible types if their types are the
+/// same. See 6.7.[2,3,5] for additional rules.
+bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS) {
+ return !mergeTypes(LHS, RHS).isNull();
+}
+
+QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs) {
+ const FunctionType *lbase = lhs->getAsFunctionType();
+ const FunctionType *rbase = rhs->getAsFunctionType();
+ const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase);
+ const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase);
+ bool allLTypes = true;
+ bool allRTypes = true;
+
+ // Check return type
+ QualType retType = mergeTypes(lbase->getResultType(), rbase->getResultType());
+ if (retType.isNull()) return QualType();
+ if (getCanonicalType(retType) != getCanonicalType(lbase->getResultType()))
+ allLTypes = false;
+ if (getCanonicalType(retType) != getCanonicalType(rbase->getResultType()))
+ allRTypes = false;
+
+ if (lproto && rproto) { // two C99 style function prototypes
+ assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
+ "C++ shouldn't be here");
+ unsigned lproto_nargs = lproto->getNumArgs();
+ unsigned rproto_nargs = rproto->getNumArgs();
+
+ // Compatible functions must have the same number of arguments
+ if (lproto_nargs != rproto_nargs)
+ return QualType();
+
+ // Variadic and non-variadic functions aren't compatible
+ if (lproto->isVariadic() != rproto->isVariadic())
+ return QualType();
+
+ if (lproto->getTypeQuals() != rproto->getTypeQuals())
+ return QualType();
+
+ // Check argument compatibility
+ llvm::SmallVector<QualType, 10> types;
+ for (unsigned i = 0; i < lproto_nargs; i++) {
+ QualType largtype = lproto->getArgType(i).getUnqualifiedType();
+ QualType rargtype = rproto->getArgType(i).getUnqualifiedType();
+ QualType argtype = mergeTypes(largtype, rargtype);
+ if (argtype.isNull()) return QualType();
+ types.push_back(argtype);
+ if (getCanonicalType(argtype) != getCanonicalType(largtype))
+ allLTypes = false;
+ if (getCanonicalType(argtype) != getCanonicalType(rargtype))
+ allRTypes = false;
+ }
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+ return getFunctionType(retType, types.begin(), types.size(),
+ lproto->isVariadic(), lproto->getTypeQuals());
+ }
+
+ if (lproto) allRTypes = false;
+ if (rproto) allLTypes = false;
+
+ const FunctionProtoType *proto = lproto ? lproto : rproto;
+ if (proto) {
+ assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
+ if (proto->isVariadic()) return QualType();
+ // Check that the types are compatible with the types that
+ // would result from default argument promotions (C99 6.7.5.3p15).
+ // The only types actually affected are promotable integer
+ // types and floats, which would be passed as a different
+ // type depending on whether the prototype is visible.
+ unsigned proto_nargs = proto->getNumArgs();
+ for (unsigned i = 0; i < proto_nargs; ++i) {
+ QualType argTy = proto->getArgType(i);
+ if (argTy->isPromotableIntegerType() ||
+ getCanonicalType(argTy).getUnqualifiedType() == FloatTy)
+ return QualType();
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+ return getFunctionType(retType, proto->arg_type_begin(),
+ proto->getNumArgs(), lproto->isVariadic(),
+ lproto->getTypeQuals());
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+ return getFunctionNoProtoType(retType);
+}
+
+QualType ASTContext::mergeTypes(QualType LHS, QualType RHS) {
+ // C++ [expr]: If an expression initially has the type "reference to T", the
+ // type is adjusted to "T" prior to any further analysis, the expression
+ // designates the object or function denoted by the reference, and the
+ // expression is an lvalue unless the reference is an rvalue reference and
+ // the expression is a function call (possibly inside parentheses).
+ // FIXME: C++ shouldn't be going through here! The rules are different
+ // enough that they should be handled separately.
+ // FIXME: Merging of lvalue and rvalue references is incorrect. C++ *really*
+ // shouldn't be going through here!
+ if (const ReferenceType *RT = LHS->getAsReferenceType())
+ LHS = RT->getPointeeType();
+ if (const ReferenceType *RT = RHS->getAsReferenceType())
+ RHS = RT->getPointeeType();
+
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+
+ // If the qualifiers are different, the types aren't compatible
+ // Note that we handle extended qualifiers later, in the
+ // case for ExtQualType.
+ if (LHSCan.getCVRQualifiers() != RHSCan.getCVRQualifiers())
+ return QualType();
+
+ Type::TypeClass LHSClass = LHSCan->getTypeClass();
+ Type::TypeClass RHSClass = RHSCan->getTypeClass();
+
+ // We want to consider the two function types to be the same for these
+ // comparisons, just force one to the other.
+ if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
+ if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
+
+ // Strip off objc_gc attributes off the top level so they can be merged.
+ // This is a complete mess, but the attribute itself doesn't make much sense.
+ if (RHSClass == Type::ExtQual) {
+ QualType::GCAttrTypes GCAttr = RHSCan.getObjCGCAttr();
+ if (GCAttr != QualType::GCNone) {
+ RHS = QualType(cast<ExtQualType>(RHS.getDesugaredType())->getBaseType(),
+ RHS.getCVRQualifiers());
+ QualType Result = mergeTypes(LHS, RHS);
+ if (Result.getObjCGCAttr() == QualType::GCNone)
+ Result = getObjCGCQualType(Result, GCAttr);
+ else if (Result.getObjCGCAttr() != GCAttr)
+ Result = QualType();
+ return Result;
+ }
+ }
+ if (LHSClass == Type::ExtQual) {
+ QualType::GCAttrTypes GCAttr = LHSCan.getObjCGCAttr();
+ if (GCAttr != QualType::GCNone) {
+ LHS = QualType(cast<ExtQualType>(LHS.getDesugaredType())->getBaseType(),
+ LHS.getCVRQualifiers());
+ QualType Result = mergeTypes(LHS, RHS);
+ if (Result.getObjCGCAttr() == QualType::GCNone)
+ Result = getObjCGCQualType(Result, GCAttr);
+ else if (Result.getObjCGCAttr() != GCAttr)
+ Result = QualType();
+ return Result;
+ }
+ }
+
+ // Same as above for arrays
+ if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
+ LHSClass = Type::ConstantArray;
+ if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
+ RHSClass = Type::ConstantArray;
+
+ // Canonicalize ExtVector -> Vector.
+ if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
+ if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
+
+ // Consider qualified interfaces and interfaces the same.
+ if (LHSClass == Type::ObjCQualifiedInterface) LHSClass = Type::ObjCInterface;
+ if (RHSClass == Type::ObjCQualifiedInterface) RHSClass = Type::ObjCInterface;
+
+ // If the canonical type classes don't match.
+ if (LHSClass != RHSClass) {
+ const ObjCInterfaceType* LHSIface = LHS->getAsObjCInterfaceType();
+ const ObjCInterfaceType* RHSIface = RHS->getAsObjCInterfaceType();
+
+ // 'id' and 'Class' act sort of like void* for ObjC interfaces
+ if (LHSIface && (isObjCIdStructType(RHS) || isObjCClassStructType(RHS)))
+ return LHS;
+ if (RHSIface && (isObjCIdStructType(LHS) || isObjCClassStructType(LHS)))
+ return RHS;
+
+ // ID is compatible with all qualified id types.
+ if (LHS->isObjCQualifiedIdType()) {
+ if (const PointerType *PT = RHS->getAsPointerType()) {
+ QualType pType = PT->getPointeeType();
+ if (isObjCIdStructType(pType) || isObjCClassStructType(pType))
+ return LHS;
+ // FIXME: need to use ObjCQualifiedIdTypesAreCompatible(LHS, RHS, true).
+ // Unfortunately, this API is part of Sema (which we don't have access
+ // to. Need to refactor. The following check is insufficient, since we
+ // need to make sure the class implements the protocol.
+ if (pType->isObjCInterfaceType())
+ return LHS;
+ }
+ }
+ if (RHS->isObjCQualifiedIdType()) {
+ if (const PointerType *PT = LHS->getAsPointerType()) {
+ QualType pType = PT->getPointeeType();
+ if (isObjCIdStructType(pType) || isObjCClassStructType(pType))
+ return RHS;
+ // FIXME: need to use ObjCQualifiedIdTypesAreCompatible(LHS, RHS, true).
+ // Unfortunately, this API is part of Sema (which we don't have access
+ // to. Need to refactor. The following check is insufficient, since we
+ // need to make sure the class implements the protocol.
+ if (pType->isObjCInterfaceType())
+ return RHS;
+ }
+ }
+ // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
+ // a signed integer type, or an unsigned integer type.
+ if (const EnumType* ETy = LHS->getAsEnumType()) {
+ if (ETy->getDecl()->getIntegerType() == RHSCan.getUnqualifiedType())
+ return RHS;
+ }
+ if (const EnumType* ETy = RHS->getAsEnumType()) {
+ if (ETy->getDecl()->getIntegerType() == LHSCan.getUnqualifiedType())
+ return LHS;
+ }
+
+ return QualType();
+ }
+
+ // The canonical type classes match.
+ switch (LHSClass) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical and dependent types shouldn't get here");
+ return QualType();
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ assert(false && "C++ should never be in mergeTypes");
+ return QualType();
+
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::FunctionProto:
+ case Type::ExtVector:
+ case Type::ObjCQualifiedInterface:
+ assert(false && "Types are eliminated above");
+ return QualType();
+
+ case Type::Pointer:
+ {
+ // Merge two pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAsPointerType()->getPointeeType();
+ QualType RHSPointee = RHS->getAsPointerType()->getPointeeType();
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getPointerType(ResultType);
+ }
+ case Type::BlockPointer:
+ {
+ // Merge two block pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAsBlockPointerType()->getPointeeType();
+ QualType RHSPointee = RHS->getAsBlockPointerType()->getPointeeType();
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getBlockPointerType(ResultType);
+ }
+ case Type::ConstantArray:
+ {
+ const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
+ const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
+ if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
+ return QualType();
+
+ QualType LHSElem = getAsArrayType(LHS)->getElementType();
+ QualType RHSElem = getAsArrayType(RHS)->getElementType();
+ QualType ResultType = mergeTypes(LHSElem, RHSElem);
+ if (ResultType.isNull()) return QualType();
+ if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
+ const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
+ if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of LHS, but the type
+ // has to be different.
+ return LHS;
+ }
+ if (RVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of RHS, but the type
+ // has to be different.
+ return RHS;
+ }
+ if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS;
+ if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS;
+ return getIncompleteArrayType(ResultType, ArrayType::ArraySizeModifier(),0);
+ }
+ case Type::FunctionNoProto:
+ return mergeFunctionTypes(LHS, RHS);
+ case Type::Record:
+ case Type::Enum:
+ // FIXME: Why are these compatible?
+ if (isObjCIdStructType(LHS) && isObjCClassStructType(RHS)) return LHS;
+ if (isObjCClassStructType(LHS) && isObjCIdStructType(RHS)) return LHS;
+ return QualType();
+ case Type::Builtin:
+ // Only exactly equal builtin types are compatible, which is tested above.
+ return QualType();
+ case Type::Complex:
+ // Distinct complex types are incompatible.
+ return QualType();
+ case Type::Vector:
+ // FIXME: The merged type should be an ExtVector!
+ if (areCompatVectorTypes(LHS->getAsVectorType(), RHS->getAsVectorType()))
+ return LHS;
+ return QualType();
+ case Type::ObjCInterface: {
+ // Check if the interfaces are assignment compatible.
+ // FIXME: This should be type compatibility, e.g. whether
+ // "LHS x; RHS x;" at global scope is legal.
+ const ObjCInterfaceType* LHSIface = LHS->getAsObjCInterfaceType();
+ const ObjCInterfaceType* RHSIface = RHS->getAsObjCInterfaceType();
+ if (LHSIface && RHSIface &&
+ canAssignObjCInterfaces(LHSIface, RHSIface))
+ return LHS;
+
+ return QualType();
+ }
+ case Type::ObjCQualifiedId:
+ // Distinct qualified id's are not compatible.
+ return QualType();
+ case Type::FixedWidthInt:
+ // Distinct fixed-width integers are not compatible.
+ return QualType();
+ case Type::ExtQual:
+ // FIXME: ExtQual types can be compatible even if they're not
+ // identical!
+ return QualType();
+ // First attempt at an implementation, but I'm not really sure it's
+ // right...
+#if 0
+ ExtQualType* LQual = cast<ExtQualType>(LHSCan);
+ ExtQualType* RQual = cast<ExtQualType>(RHSCan);
+ if (LQual->getAddressSpace() != RQual->getAddressSpace() ||
+ LQual->getObjCGCAttr() != RQual->getObjCGCAttr())
+ return QualType();
+ QualType LHSBase, RHSBase, ResultType, ResCanUnqual;
+ LHSBase = QualType(LQual->getBaseType(), 0);
+ RHSBase = QualType(RQual->getBaseType(), 0);
+ ResultType = mergeTypes(LHSBase, RHSBase);
+ if (ResultType.isNull()) return QualType();
+ ResCanUnqual = getCanonicalType(ResultType).getUnqualifiedType();
+ if (LHSCan.getUnqualifiedType() == ResCanUnqual)
+ return LHS;
+ if (RHSCan.getUnqualifiedType() == ResCanUnqual)
+ return RHS;
+ ResultType = getAddrSpaceQualType(ResultType, LQual->getAddressSpace());
+ ResultType = getObjCGCQualType(ResultType, LQual->getObjCGCAttr());
+ ResultType.setCVRQualifiers(LHSCan.getCVRQualifiers());
+ return ResultType;
+#endif
+
+ case Type::TemplateSpecialization:
+ assert(false && "Dependent types have no size");
+ break;
+ }
+
+ return QualType();
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Predicates
+//===----------------------------------------------------------------------===//
+
+unsigned ASTContext::getIntWidth(QualType T) {
+ if (T == BoolTy)
+ return 1;
+ if (FixedWidthIntType* FWIT = dyn_cast<FixedWidthIntType>(T)) {
+ return FWIT->getWidth();
+ }
+ // For builtin types, just use the standard type sizing method
+ return (unsigned)getTypeSize(T);
+}
+
+QualType ASTContext::getCorrespondingUnsignedType(QualType T) {
+ assert(T->isSignedIntegerType() && "Unexpected type");
+ if (const EnumType* ETy = T->getAsEnumType())
+ T = ETy->getDecl()->getIntegerType();
+ const BuiltinType* BTy = T->getAsBuiltinType();
+ assert (BTy && "Unexpected signed integer type");
+ switch (BTy->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return UnsignedCharTy;
+ case BuiltinType::Short:
+ return UnsignedShortTy;
+ case BuiltinType::Int:
+ return UnsignedIntTy;
+ case BuiltinType::Long:
+ return UnsignedLongTy;
+ case BuiltinType::LongLong:
+ return UnsignedLongLongTy;
+ case BuiltinType::Int128:
+ return UnsignedInt128Ty;
+ default:
+ assert(0 && "Unexpected signed integer type");
+ return QualType();
+ }
+}
+
+ExternalASTSource::~ExternalASTSource() { }
+
+void ExternalASTSource::PrintStats() { }
diff --git a/lib/AST/Builtins.cpp b/lib/AST/Builtins.cpp
new file mode 100644
index 0000000..8368feb
--- /dev/null
+++ b/lib/AST/Builtins.cpp
@@ -0,0 +1,290 @@
+//===--- Builtins.cpp - Builtin function implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various things for builtin functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Builtins.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/TargetInfo.h"
+using namespace clang;
+
+static const Builtin::Info BuiltinInfo[] = {
+ { "not a builtin function", 0, 0, 0, false },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, false },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER, false },
+#include "clang/AST/Builtins.def"
+};
+
+const Builtin::Info &Builtin::Context::GetRecord(unsigned ID) const {
+ if (ID < Builtin::FirstTSBuiltin)
+ return BuiltinInfo[ID];
+ assert(ID - Builtin::FirstTSBuiltin < NumTSRecords && "Invalid builtin ID!");
+ return TSRecords[ID - Builtin::FirstTSBuiltin];
+}
+
+/// \brief Load all of the target builtins. This must be called
+/// prior to initializing the builtin identifiers.
+void Builtin::Context::InitializeTargetBuiltins(const TargetInfo &Target) {
+ Target.getTargetBuiltins(TSRecords, NumTSRecords);
+}
+
+/// InitializeBuiltins - Mark the identifiers for all the builtins with their
+/// appropriate builtin ID # and mark any non-portable builtin identifiers as
+/// such.
+void Builtin::Context::InitializeBuiltins(IdentifierTable &Table,
+ bool NoBuiltins) {
+ // Step #1: mark all target-independent builtins with their ID's.
+ for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
+ if (!BuiltinInfo[i].Suppressed &&
+ (!NoBuiltins || !strchr(BuiltinInfo[i].Attributes, 'f')))
+ Table.get(BuiltinInfo[i].Name).setBuiltinID(i);
+
+ // Step #2: Register target-specific builtins.
+ for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
+ if (!TSRecords[i].Suppressed &&
+ (!NoBuiltins ||
+ (TSRecords[i].Attributes &&
+ !strchr(TSRecords[i].Attributes, 'f'))))
+ Table.get(TSRecords[i].Name).setBuiltinID(i+Builtin::FirstTSBuiltin);
+}
+
+void
+Builtin::Context::GetBuiltinNames(llvm::SmallVectorImpl<const char *> &Names,
+ bool NoBuiltins) {
+ // Final all target-independent names
+ for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
+ if (!BuiltinInfo[i].Suppressed &&
+ (!NoBuiltins || !strchr(BuiltinInfo[i].Attributes, 'f')))
+ Names.push_back(BuiltinInfo[i].Name);
+
+ // Find target-specific names.
+ for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
+ if (!TSRecords[i].Suppressed &&
+ (!NoBuiltins ||
+ (TSRecords[i].Attributes &&
+ !strchr(TSRecords[i].Attributes, 'f'))))
+ Names.push_back(TSRecords[i].Name);
+}
+
+bool
+Builtin::Context::isPrintfLike(unsigned ID, unsigned &FormatIdx,
+ bool &HasVAListArg) {
+ const char *Printf = strpbrk(GetRecord(ID).Attributes, "pP");
+ if (!Printf)
+ return false;
+
+ HasVAListArg = (*Printf == 'P');
+
+ ++Printf;
+ assert(*Printf == ':' && "p or P specifier must have be followed by a ':'");
+ ++Printf;
+
+ assert(strchr(Printf, ':') && "printf specifier must end with a ':'");
+ FormatIdx = strtol(Printf, 0, 10);
+ return true;
+}
+
+/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
+/// pointer over the consumed characters. This returns the resultant type.
+static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context,
+ Builtin::Context::GetBuiltinTypeError &Error,
+ bool AllowTypeModifiers = true) {
+ // Modifiers.
+ int HowLong = 0;
+ bool Signed = false, Unsigned = false;
+
+ // Read the modifiers first.
+ bool Done = false;
+ while (!Done) {
+ switch (*Str++) {
+ default: Done = true; --Str; break;
+ case 'S':
+ assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Signed && "Can't use 'S' modifier multiple times!");
+ Signed = true;
+ break;
+ case 'U':
+ assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Unsigned && "Can't use 'S' modifier multiple times!");
+ Unsigned = true;
+ break;
+ case 'L':
+ assert(HowLong <= 2 && "Can't have LLLL modifier");
+ ++HowLong;
+ break;
+ }
+ }
+
+ QualType Type;
+
+ // Read the base type.
+ switch (*Str++) {
+ default: assert(0 && "Unknown builtin type letter!");
+ case 'v':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'v'!");
+ Type = Context.VoidTy;
+ break;
+ case 'f':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'f'!");
+ Type = Context.FloatTy;
+ break;
+ case 'd':
+ assert(HowLong < 2 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'd'!");
+ if (HowLong)
+ Type = Context.LongDoubleTy;
+ else
+ Type = Context.DoubleTy;
+ break;
+ case 's':
+ assert(HowLong == 0 && "Bad modifiers used with 's'!");
+ if (Unsigned)
+ Type = Context.UnsignedShortTy;
+ else
+ Type = Context.ShortTy;
+ break;
+ case 'i':
+ if (HowLong == 3)
+ Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
+ else if (HowLong == 2)
+ Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ else if (HowLong == 1)
+ Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
+ else
+ Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
+ break;
+ case 'c':
+ assert(HowLong == 0 && "Bad modifiers used with 'c'!");
+ if (Signed)
+ Type = Context.SignedCharTy;
+ else if (Unsigned)
+ Type = Context.UnsignedCharTy;
+ else
+ Type = Context.CharTy;
+ break;
+ case 'b': // boolean
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
+ Type = Context.BoolTy;
+ break;
+ case 'z': // size_t.
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
+ Type = Context.getSizeType();
+ break;
+ case 'F':
+ Type = Context.getCFConstantStringType();
+ break;
+ case 'a':
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ break;
+ case 'A':
+ // This is a "reference" to a va_list; however, what exactly
+ // this means depends on how va_list is defined. There are two
+ // different kinds of va_list: ones passed by value, and ones
+ // passed by reference. An example of a by-value va_list is
+ // x86, where va_list is a char*. An example of by-ref va_list
+ // is x86-64, where va_list is a __va_list_tag[1]. For x86,
+ // we want this argument to be a char*&; for x86-64, we want
+ // it to be a __va_list_tag*.
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ if (Type->isArrayType()) {
+ Type = Context.getArrayDecayedType(Type);
+ } else {
+ Type = Context.getLValueReferenceType(Type);
+ }
+ break;
+ case 'V': {
+ char *End;
+
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false);
+ Type = Context.getVectorType(ElementType, NumElements);
+ break;
+ }
+ case 'P': {
+ IdentifierInfo *II = &Context.Idents.get("FILE");
+ DeclContext::lookup_result Lookup
+ = Context.getTranslationUnitDecl()->lookup(Context, II);
+ if (Lookup.first != Lookup.second && isa<TypeDecl>(*Lookup.first)) {
+ Type = Context.getTypeDeclType(cast<TypeDecl>(*Lookup.first));
+ break;
+ }
+ else {
+ Error = Builtin::Context::GE_Missing_FILE;
+ return QualType();
+ }
+ }
+ }
+
+ if (!AllowTypeModifiers)
+ return Type;
+
+ Done = false;
+ while (!Done) {
+ switch (*Str++) {
+ default: Done = true; --Str; break;
+ case '*':
+ Type = Context.getPointerType(Type);
+ break;
+ case '&':
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ // FIXME: There's no way to have a built-in with an rvalue ref arg.
+ case 'C':
+ Type = Type.getQualifiedType(QualType::Const);
+ break;
+ }
+ }
+
+ return Type;
+}
+
+/// GetBuiltinType - Return the type for the specified builtin.
+QualType Builtin::Context::GetBuiltinType(unsigned id, ASTContext &Context,
+ GetBuiltinTypeError &Error) const {
+ const char *TypeStr = GetRecord(id).Type;
+
+ llvm::SmallVector<QualType, 8> ArgTypes;
+
+ Error = GE_None;
+ QualType ResType = DecodeTypeFromStr(TypeStr, Context, Error);
+ if (Error != GE_None)
+ return QualType();
+ while (TypeStr[0] && TypeStr[0] != '.') {
+ QualType Ty = DecodeTypeFromStr(TypeStr, Context, Error);
+ if (Error != GE_None)
+ return QualType();
+
+ // Do array -> pointer decay. The builtin should use the decayed type.
+ if (Ty->isArrayType())
+ Ty = Context.getArrayDecayedType(Ty);
+
+ ArgTypes.push_back(Ty);
+ }
+
+ assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
+ "'.' should only occur at end of builtin type list!");
+
+ // handle untyped/variadic arguments "T c99Style();" or "T cppStyle(...);".
+ if (ArgTypes.size() == 0 && TypeStr[0] == '.')
+ return Context.getFunctionNoProtoType(ResType);
+ return Context.getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(),
+ TypeStr[0] == '.', 0);
+}
diff --git a/lib/AST/CFG.cpp b/lib/AST/CFG.cpp
new file mode 100644
index 0000000..9f2f207
--- /dev/null
+++ b/lib/AST/CFG.cpp
@@ -0,0 +1,1913 @@
+//===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFG and CFGBuilder classes for representing and
+// building Control-Flow Graphs (CFGs) from ASTs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/CFG.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/Compiler.h"
+#include <llvm/Support/Allocator.h>
+#include <llvm/Support/Format.h>
+#include <iomanip>
+#include <algorithm>
+#include <sstream>
+
+using namespace clang;
+
+namespace {
+
+// SaveAndRestore - A utility class that uses RIIA to save and restore
+// the value of a variable.
+template<typename T>
+struct VISIBILITY_HIDDEN SaveAndRestore {
+ SaveAndRestore(T& x) : X(x), old_value(x) {}
+ ~SaveAndRestore() { X = old_value; }
+ T get() { return old_value; }
+
+ T& X;
+ T old_value;
+};
+
+static SourceLocation GetEndLoc(Decl* D) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(D))
+ if (Expr* Ex = VD->getInit())
+ return Ex->getSourceRange().getEnd();
+
+ return D->getLocation();
+}
+
+/// CFGBuilder - This class implements CFG construction from an AST.
+/// The builder is stateful: an instance of the builder should be used to only
+/// construct a single CFG.
+///
+/// Example usage:
+///
+/// CFGBuilder builder;
+/// CFG* cfg = builder.BuildAST(stmt1);
+///
+/// CFG construction is done via a recursive walk of an AST.
+/// We actually parse the AST in reverse order so that the successor
+/// of a basic block is constructed prior to its predecessor. This
+/// allows us to nicely capture implicit fall-throughs without extra
+/// basic blocks.
+///
+class VISIBILITY_HIDDEN CFGBuilder : public StmtVisitor<CFGBuilder,CFGBlock*> {
+ CFG* cfg;
+ CFGBlock* Block;
+ CFGBlock* Succ;
+ CFGBlock* ContinueTargetBlock;
+ CFGBlock* BreakTargetBlock;
+ CFGBlock* SwitchTerminatedBlock;
+ CFGBlock* DefaultCaseBlock;
+
+ // LabelMap records the mapping from Label expressions to their blocks.
+ typedef llvm::DenseMap<LabelStmt*,CFGBlock*> LabelMapTy;
+ LabelMapTy LabelMap;
+
+ // A list of blocks that end with a "goto" that must be backpatched to
+ // their resolved targets upon completion of CFG construction.
+ typedef std::vector<CFGBlock*> BackpatchBlocksTy;
+ BackpatchBlocksTy BackpatchBlocks;
+
+ // A list of labels whose address has been taken (for indirect gotos).
+ typedef llvm::SmallPtrSet<LabelStmt*,5> LabelSetTy;
+ LabelSetTy AddressTakenLabels;
+
+public:
+ explicit CFGBuilder() : cfg(NULL), Block(NULL), Succ(NULL),
+ ContinueTargetBlock(NULL), BreakTargetBlock(NULL),
+ SwitchTerminatedBlock(NULL), DefaultCaseBlock(NULL) {
+ // Create an empty CFG.
+ cfg = new CFG();
+ }
+
+ ~CFGBuilder() { delete cfg; }
+
+ // buildCFG - Used by external clients to construct the CFG.
+ CFG* buildCFG(Stmt* Statement);
+
+ // Visitors to walk an AST and construct the CFG. Called by
+ // buildCFG. Do not call directly!
+
+ CFGBlock* VisitBreakStmt(BreakStmt* B);
+ CFGBlock* VisitCaseStmt(CaseStmt* Terminator);
+ CFGBlock* VisitCompoundStmt(CompoundStmt* C);
+ CFGBlock* VisitContinueStmt(ContinueStmt* C);
+ CFGBlock* VisitDefaultStmt(DefaultStmt* D);
+ CFGBlock* VisitDoStmt(DoStmt* D);
+ CFGBlock* VisitForStmt(ForStmt* F);
+ CFGBlock* VisitGotoStmt(GotoStmt* G);
+ CFGBlock* VisitIfStmt(IfStmt* I);
+ CFGBlock* VisitIndirectGotoStmt(IndirectGotoStmt* I);
+ CFGBlock* VisitLabelStmt(LabelStmt* L);
+ CFGBlock* VisitNullStmt(NullStmt* Statement);
+ CFGBlock* VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
+ CFGBlock* VisitReturnStmt(ReturnStmt* R);
+ CFGBlock* VisitStmt(Stmt* Statement);
+ CFGBlock* VisitSwitchStmt(SwitchStmt* Terminator);
+ CFGBlock* VisitWhileStmt(WhileStmt* W);
+
+ // FIXME: Add support for ObjC-specific control-flow structures.
+
+ // NYS == Not Yet Supported
+ CFGBlock* NYS() {
+ badCFG = true;
+ return Block;
+ }
+
+ CFGBlock* VisitObjCAtTryStmt(ObjCAtTryStmt* S);
+ CFGBlock* VisitObjCAtCatchStmt(ObjCAtCatchStmt* S) {
+ // FIXME: For now we pretend that @catch and the code it contains
+ // does not exit.
+ return Block;
+ }
+
+ // FIXME: This is not completely supported. We basically @throw like
+ // a 'return'.
+ CFGBlock* VisitObjCAtThrowStmt(ObjCAtThrowStmt* S);
+
+ CFGBlock* VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt* S);
+
+ // Blocks.
+ CFGBlock* VisitBlockExpr(BlockExpr* E) { return NYS(); }
+ CFGBlock* VisitBlockDeclRefExpr(BlockDeclRefExpr* E) { return NYS(); }
+
+private:
+ CFGBlock* createBlock(bool add_successor = true);
+ CFGBlock* addStmt(Stmt* Terminator);
+ CFGBlock* WalkAST(Stmt* Terminator, bool AlwaysAddStmt);
+ CFGBlock* WalkAST_VisitChildren(Stmt* Terminator);
+ CFGBlock* WalkAST_VisitDeclSubExpr(Decl* D);
+ CFGBlock* WalkAST_VisitStmtExpr(StmtExpr* Terminator);
+ bool FinishBlock(CFGBlock* B);
+
+ bool badCFG;
+};
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static VariableArrayType* FindVA(Type* t) {
+ while (ArrayType* vt = dyn_cast<ArrayType>(t)) {
+ if (VariableArrayType* vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return 0;
+}
+
+/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can
+/// represent an arbitrary statement. Examples include a single expression
+/// or a function body (compound statement). The ownership of the returned
+/// CFG is transferred to the caller. If CFG construction fails, this method
+/// returns NULL.
+CFG* CFGBuilder::buildCFG(Stmt* Statement) {
+ assert (cfg);
+ if (!Statement) return NULL;
+
+ badCFG = false;
+
+ // Create an empty block that will serve as the exit block for the CFG.
+ // Since this is the first block added to the CFG, it will be implicitly
+ // registered as the exit block.
+ Succ = createBlock();
+ assert (Succ == &cfg->getExit());
+ Block = NULL; // the EXIT block is empty. Create all other blocks lazily.
+
+ // Visit the statements and create the CFG.
+ CFGBlock* B = Visit(Statement);
+ if (!B) B = Succ;
+
+ if (B) {
+ // Finalize the last constructed block. This usually involves
+ // reversing the order of the statements in the block.
+ if (Block) FinishBlock(B);
+
+ // Backpatch the gotos whose label -> block mappings we didn't know
+ // when we encountered them.
+ for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(),
+ E = BackpatchBlocks.end(); I != E; ++I ) {
+
+ CFGBlock* B = *I;
+ GotoStmt* G = cast<GotoStmt>(B->getTerminator());
+ LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
+
+ // If there is no target for the goto, then we are looking at an
+ // incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ B->addSuccessor(LI->second);
+ }
+
+ // Add successors to the Indirect Goto Dispatch block (if we have one).
+ if (CFGBlock* B = cfg->getIndirectGotoBlock())
+ for (LabelSetTy::iterator I = AddressTakenLabels.begin(),
+ E = AddressTakenLabels.end(); I != E; ++I ) {
+
+ // Lookup the target block.
+ LabelMapTy::iterator LI = LabelMap.find(*I);
+
+ // If there is no target block that contains label, then we are looking
+ // at an incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ B->addSuccessor(LI->second);
+ }
+
+ Succ = B;
+ }
+
+ // Create an empty entry block that has no predecessors.
+ cfg->setEntry(createBlock());
+
+ if (badCFG) {
+ delete cfg;
+ cfg = NULL;
+ return NULL;
+ }
+
+ // NULL out cfg so that repeated calls to the builder will fail and that
+ // the ownership of the constructed CFG is passed to the caller.
+ CFG* t = cfg;
+ cfg = NULL;
+ return t;
+}
+
+/// createBlock - Used to lazily create blocks that are connected
+/// to the current (global) succcessor.
+CFGBlock* CFGBuilder::createBlock(bool add_successor) {
+ CFGBlock* B = cfg->createBlock();
+ if (add_successor && Succ) B->addSuccessor(Succ);
+ return B;
+}
+
+/// FinishBlock - When the last statement has been added to the block,
+/// we must reverse the statements because they have been inserted
+/// in reverse order.
+bool CFGBuilder::FinishBlock(CFGBlock* B) {
+ if (badCFG)
+ return false;
+
+ assert (B);
+ B->reverseStmts();
+ return true;
+}
+
+/// addStmt - Used to add statements/expressions to the current CFGBlock
+/// "Block". This method calls WalkAST on the passed statement to see if it
+/// contains any short-circuit expressions. If so, it recursively creates
+/// the necessary blocks for such expressions. It returns the "topmost" block
+/// of the created blocks, or the original value of "Block" when this method
+/// was called if no additional blocks are created.
+CFGBlock* CFGBuilder::addStmt(Stmt* Terminator) {
+ if (!Block) Block = createBlock();
+ return WalkAST(Terminator,true);
+}
+
+/// WalkAST - Used by addStmt to walk the subtree of a statement and
+/// add extra blocks for ternary operators, &&, and ||. We also
+/// process "," and DeclStmts (which may contain nested control-flow).
+CFGBlock* CFGBuilder::WalkAST(Stmt* Terminator, bool AlwaysAddStmt = false) {
+ switch (Terminator->getStmtClass()) {
+ case Stmt::ConditionalOperatorClass: {
+ ConditionalOperator* C = cast<ConditionalOperator>(Terminator);
+
+ // Create the confluence block that will "merge" the results
+ // of the ternary expression.
+ CFGBlock* ConfluenceBlock = (Block) ? Block : createBlock();
+ ConfluenceBlock->appendStmt(C);
+ if (!FinishBlock(ConfluenceBlock))
+ return 0;
+
+ // Create a block for the LHS expression if there is an LHS expression.
+ // A GCC extension allows LHS to be NULL, causing the condition to
+ // be the value that is returned instead.
+ // e.g: x ?: y is shorthand for: x ? x : y;
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* LHSBlock = NULL;
+ if (C->getLHS()) {
+ LHSBlock = Visit(C->getLHS());
+ if (!FinishBlock(LHSBlock))
+ return 0;
+ Block = NULL;
+ }
+
+ // Create the block for the RHS expression.
+ Succ = ConfluenceBlock;
+ CFGBlock* RHSBlock = Visit(C->getRHS());
+ if (!FinishBlock(RHSBlock))
+ return 0;
+
+ // Create the block that will contain the condition.
+ Block = createBlock(false);
+
+ if (LHSBlock)
+ Block->addSuccessor(LHSBlock);
+ else {
+ // If we have no LHS expression, add the ConfluenceBlock as a direct
+ // successor for the block containing the condition. Moreover,
+ // we need to reverse the order of the predecessors in the
+ // ConfluenceBlock because the RHSBlock will have been added to
+ // the succcessors already, and we want the first predecessor to the
+ // the block containing the expression for the case when the ternary
+ // expression evaluates to true.
+ Block->addSuccessor(ConfluenceBlock);
+ assert (ConfluenceBlock->pred_size() == 2);
+ std::reverse(ConfluenceBlock->pred_begin(),
+ ConfluenceBlock->pred_end());
+ }
+
+ Block->addSuccessor(RHSBlock);
+
+ Block->setTerminator(C);
+ return addStmt(C->getCond());
+ }
+
+ case Stmt::ChooseExprClass: {
+ ChooseExpr* C = cast<ChooseExpr>(Terminator);
+
+ CFGBlock* ConfluenceBlock = Block ? Block : createBlock();
+ ConfluenceBlock->appendStmt(C);
+ if (!FinishBlock(ConfluenceBlock))
+ return 0;
+
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* LHSBlock = Visit(C->getLHS());
+ if (!FinishBlock(LHSBlock))
+ return 0;
+
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* RHSBlock = Visit(C->getRHS());
+ if (!FinishBlock(RHSBlock))
+ return 0;
+
+ Block = createBlock(false);
+ Block->addSuccessor(LHSBlock);
+ Block->addSuccessor(RHSBlock);
+ Block->setTerminator(C);
+ return addStmt(C->getCond());
+ }
+
+ case Stmt::DeclStmtClass: {
+ DeclStmt *DS = cast<DeclStmt>(Terminator);
+ if (DS->isSingleDecl()) {
+ Block->appendStmt(Terminator);
+ return WalkAST_VisitDeclSubExpr(DS->getSingleDecl());
+ }
+
+ CFGBlock* B = 0;
+
+ // FIXME: Add a reverse iterator for DeclStmt to avoid this
+ // extra copy.
+ typedef llvm::SmallVector<Decl*,10> BufTy;
+ BufTy Buf(DS->decl_begin(), DS->decl_end());
+
+ for (BufTy::reverse_iterator I=Buf.rbegin(), E=Buf.rend(); I!=E; ++I) {
+ // Get the alignment of the new DeclStmt, padding out to >=8 bytes.
+ unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
+ ? 8 : llvm::AlignOf<DeclStmt>::Alignment;
+
+ // Allocate the DeclStmt using the BumpPtrAllocator. It will
+ // get automatically freed with the CFG.
+ DeclGroupRef DG(*I);
+ Decl* D = *I;
+ void* Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
+
+ DeclStmt* DS = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
+
+ // Append the fake DeclStmt to block.
+ Block->appendStmt(DS);
+ B = WalkAST_VisitDeclSubExpr(D);
+ }
+ return B;
+ }
+
+ case Stmt::AddrLabelExprClass: {
+ AddrLabelExpr* A = cast<AddrLabelExpr>(Terminator);
+ AddressTakenLabels.insert(A->getLabel());
+
+ if (AlwaysAddStmt) Block->appendStmt(Terminator);
+ return Block;
+ }
+
+ case Stmt::StmtExprClass:
+ return WalkAST_VisitStmtExpr(cast<StmtExpr>(Terminator));
+
+ case Stmt::SizeOfAlignOfExprClass: {
+ SizeOfAlignOfExpr* E = cast<SizeOfAlignOfExpr>(Terminator);
+
+ // VLA types have expressions that must be evaluated.
+ if (E->isArgumentType()) {
+ for (VariableArrayType* VA = FindVA(E->getArgumentType().getTypePtr());
+ VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
+ addStmt(VA->getSizeExpr());
+ }
+ // Expressions in sizeof/alignof are not evaluated and thus have no
+ // control flow.
+ else
+ Block->appendStmt(Terminator);
+
+ return Block;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator* B = cast<BinaryOperator>(Terminator);
+
+ if (B->isLogicalOp()) { // && or ||
+ CFGBlock* ConfluenceBlock = (Block) ? Block : createBlock();
+ ConfluenceBlock->appendStmt(B);
+ if (!FinishBlock(ConfluenceBlock))
+ return 0;
+
+ // create the block evaluating the LHS
+ CFGBlock* LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(B);
+
+ // create the block evaluating the RHS
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock* RHSBlock = Visit(B->getRHS());
+ if (!FinishBlock(RHSBlock))
+ return 0;
+
+ // Now link the LHSBlock with RHSBlock.
+ if (B->getOpcode() == BinaryOperator::LOr) {
+ LHSBlock->addSuccessor(ConfluenceBlock);
+ LHSBlock->addSuccessor(RHSBlock);
+ }
+ else {
+ assert (B->getOpcode() == BinaryOperator::LAnd);
+ LHSBlock->addSuccessor(RHSBlock);
+ LHSBlock->addSuccessor(ConfluenceBlock);
+ }
+
+ // Generate the blocks for evaluating the LHS.
+ Block = LHSBlock;
+ return addStmt(B->getLHS());
+ }
+ else if (B->getOpcode() == BinaryOperator::Comma) { // ,
+ Block->appendStmt(B);
+ addStmt(B->getRHS());
+ return addStmt(B->getLHS());
+ }
+
+ break;
+ }
+
+ // Blocks: No support for blocks ... yet
+ case Stmt::BlockExprClass:
+ case Stmt::BlockDeclRefExprClass:
+ return NYS();
+
+ case Stmt::ParenExprClass:
+ return WalkAST(cast<ParenExpr>(Terminator)->getSubExpr(), AlwaysAddStmt);
+
+ default:
+ break;
+ };
+
+ if (AlwaysAddStmt) Block->appendStmt(Terminator);
+ return WalkAST_VisitChildren(Terminator);
+}
+
+/// WalkAST_VisitDeclSubExpr - Utility method to add block-level expressions
+/// for initializers in Decls.
+CFGBlock* CFGBuilder::WalkAST_VisitDeclSubExpr(Decl* D) {
+ VarDecl* VD = dyn_cast<VarDecl>(D);
+
+ if (!VD)
+ return Block;
+
+ Expr* Init = VD->getInit();
+
+ if (Init) {
+ // Optimization: Don't create separate block-level statements for literals.
+ switch (Init->getStmtClass()) {
+ case Stmt::IntegerLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::StringLiteralClass:
+ break;
+ default:
+ Block = addStmt(Init);
+ }
+ }
+
+ // If the type of VD is a VLA, then we must process its size expressions.
+ for (VariableArrayType* VA = FindVA(VD->getType().getTypePtr()); VA != 0;
+ VA = FindVA(VA->getElementType().getTypePtr()))
+ Block = addStmt(VA->getSizeExpr());
+
+ return Block;
+}
+
+/// WalkAST_VisitChildren - Utility method to call WalkAST on the
+/// children of a Stmt.
+CFGBlock* CFGBuilder::WalkAST_VisitChildren(Stmt* Terminator) {
+ CFGBlock* B = Block;
+ for (Stmt::child_iterator I = Terminator->child_begin(),
+ E = Terminator->child_end();
+ I != E; ++I)
+ if (*I) B = WalkAST(*I);
+
+ return B;
+}
+
+/// WalkAST_VisitStmtExpr - Utility method to handle (nested) statement
+/// expressions (a GCC extension).
+CFGBlock* CFGBuilder::WalkAST_VisitStmtExpr(StmtExpr* Terminator) {
+ Block->appendStmt(Terminator);
+ return VisitCompoundStmt(Terminator->getSubStmt());
+}
+
+/// VisitStmt - Handle statements with no branching control flow.
+CFGBlock* CFGBuilder::VisitStmt(Stmt* Statement) {
+ // We cannot assume that we are in the middle of a basic block, since
+ // the CFG might only be constructed for this single statement. If
+ // we have no current basic block, just create one lazily.
+ if (!Block) Block = createBlock();
+
+ // Simply add the statement to the current block. We actually
+ // insert statements in reverse order; this order is reversed later
+ // when processing the containing element in the AST.
+ addStmt(Statement);
+
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitNullStmt(NullStmt* Statement) {
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitCompoundStmt(CompoundStmt* C) {
+
+ CFGBlock* LastBlock = NULL;
+
+ for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
+ I != E; ++I ) {
+ LastBlock = Visit(*I);
+ }
+
+ return LastBlock;
+}
+
+CFGBlock* CFGBuilder::VisitIfStmt(IfStmt* I) {
+ // We may see an if statement in the middle of a basic block, or
+ // it may be the first statement we are processing. In either case,
+ // we create a new basic block. First, we create the blocks for
+ // the then...else statements, and then we create the block containing
+ // the if statement. If we were in the middle of a block, we
+ // stop processing that block and reverse its statements. That block
+ // is then the implicit successor for the "then" and "else" clauses.
+
+ // The block we were proccessing is now finished. Make it the
+ // successor block.
+ if (Block) {
+ Succ = Block;
+ if (!FinishBlock(Block))
+ return 0;
+ }
+
+ // Process the false branch. NULL out Block so that the recursive
+ // call to Visit will create a new basic block.
+ // Null out Block so that all successor
+ CFGBlock* ElseBlock = Succ;
+
+ if (Stmt* Else = I->getElse()) {
+ SaveAndRestore<CFGBlock*> sv(Succ);
+
+ // NULL out Block so that the recursive call to Visit will
+ // create a new basic block.
+ Block = NULL;
+ ElseBlock = Visit(Else);
+
+ if (!ElseBlock) // Can occur when the Else body has all NullStmts.
+ ElseBlock = sv.get();
+ else if (Block) {
+ if (!FinishBlock(ElseBlock))
+ return 0;
+ }
+ }
+
+ // Process the true branch. NULL out Block so that the recursive
+ // call to Visit will create a new basic block.
+ // Null out Block so that all successor
+ CFGBlock* ThenBlock;
+ {
+ Stmt* Then = I->getThen();
+ assert (Then);
+ SaveAndRestore<CFGBlock*> sv(Succ);
+ Block = NULL;
+ ThenBlock = Visit(Then);
+
+ if (!ThenBlock) {
+ // We can reach here if the "then" body has all NullStmts.
+ // Create an empty block so we can distinguish between true and false
+ // branches in path-sensitive analyses.
+ ThenBlock = createBlock(false);
+ ThenBlock->addSuccessor(sv.get());
+ }
+ else if (Block) {
+ if (!FinishBlock(ThenBlock))
+ return 0;
+ }
+ }
+
+ // Now create a new block containing the if statement.
+ Block = createBlock(false);
+
+ // Set the terminator of the new block to the If statement.
+ Block->setTerminator(I);
+
+ // Now add the successors.
+ Block->addSuccessor(ThenBlock);
+ Block->addSuccessor(ElseBlock);
+
+ // Add the condition as the last statement in the new block. This
+ // may create new blocks as the condition may contain control-flow. Any
+ // newly created blocks will be pointed to be "Block".
+ return addStmt(I->getCond()->IgnoreParens());
+}
+
+
+CFGBlock* CFGBuilder::VisitReturnStmt(ReturnStmt* R) {
+ // If we were in the middle of a block we stop processing that block
+ // and reverse its statements.
+ //
+ // NOTE: If a "return" appears in the middle of a block, this means
+ // that the code afterwards is DEAD (unreachable). We still
+ // keep a basic block for that code; a simple "mark-and-sweep"
+ // from the entry block will be able to report such dead
+ // blocks.
+ if (Block) FinishBlock(Block);
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ Block->addSuccessor(&cfg->getExit());
+
+ // Add the return statement to the block. This may create new blocks
+ // if R contains control-flow (short-circuit operations).
+ return addStmt(R);
+}
+
+CFGBlock* CFGBuilder::VisitLabelStmt(LabelStmt* L) {
+ // Get the block of the labeled statement. Add it to our map.
+ Visit(L->getSubStmt());
+ CFGBlock* LabelBlock = Block;
+
+ if (!LabelBlock) // This can happen when the body is empty, i.e.
+ LabelBlock=createBlock(); // scopes that only contains NullStmts.
+
+ assert (LabelMap.find(L) == LabelMap.end() && "label already in map");
+ LabelMap[ L ] = LabelBlock;
+
+ // Labels partition blocks, so this is the end of the basic block
+ // we were processing (L is the block's label). Because this is
+ // label (and we have already processed the substatement) there is no
+ // extra control-flow to worry about.
+ LabelBlock->setLabel(L);
+ if (!FinishBlock(LabelBlock))
+ return 0;
+
+ // We set Block to NULL to allow lazy creation of a new block
+ // (if necessary);
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = LabelBlock;
+
+ return LabelBlock;
+}
+
+CFGBlock* CFGBuilder::VisitGotoStmt(GotoStmt* G) {
+ // Goto is a control-flow statement. Thus we stop processing the
+ // current block and create a new one.
+ if (Block) FinishBlock(Block);
+ Block = createBlock(false);
+ Block->setTerminator(G);
+
+ // If we already know the mapping to the label block add the
+ // successor now.
+ LabelMapTy::iterator I = LabelMap.find(G->getLabel());
+
+ if (I == LabelMap.end())
+ // We will need to backpatch this block later.
+ BackpatchBlocks.push_back(Block);
+ else
+ Block->addSuccessor(I->second);
+
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
+ // "for" is a control-flow statement. Thus we stop processing the
+ // current block.
+
+ CFGBlock* LoopSuccessor = NULL;
+
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ }
+ else LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop
+ // can span multiple basic blocks. Thus we need the "Entry" and "Exit"
+ // blocks that evaluate the condition.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(F);
+
+ // Now add the actual condition to the condition block. Because the
+ // condition itself may contain control-flow, new blocks may be created.
+ if (Stmt* C = F->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body as
+ // well as any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // Now create the loop body.
+ {
+ assert (F->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
+ save_continue(ContinueTargetBlock),
+ save_break(BreakTargetBlock);
+
+ // Create a new block to contain the (bottom) of the loop body.
+ Block = NULL;
+
+ if (Stmt* I = F->getInc()) {
+ // Generate increment code in its own basic block. This is the target
+ // of continue statements.
+ Succ = Visit(I);
+ }
+ else {
+ // No increment code. Create a special, empty, block that is used as
+ // the target block for "looping back" to the start of the loop.
+ assert(Succ == EntryConditionBlock);
+ Succ = createBlock();
+ }
+
+ // Finish up the increment (or empty) block if it hasn't been already.
+ if (Block) {
+ assert(Block == Succ);
+ if (!FinishBlock(Block))
+ return 0;
+ Block = 0;
+ }
+
+ ContinueTargetBlock = Succ;
+
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueTargetBlock->setLoopTarget(F);
+
+ // All breaks should go to the code following the loop.
+ BreakTargetBlock = LoopSuccessor;
+
+ // Now populate the body block, and in the process create new blocks
+ // as we walk the body of the loop.
+ CFGBlock* BodyBlock = Visit(F->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "for (...;...; ) ;"
+ else if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // This new body block is a successor to our "exit" condition block.
+ ExitConditionBlock->addSuccessor(BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ ExitConditionBlock->addSuccessor(LoopSuccessor);
+
+ // If the loop contains initialization, create a new block for those
+ // statements. This block can also contain statements that precede
+ // the loop.
+ if (Stmt* I = F->getInit()) {
+ Block = createBlock();
+ return addStmt(I);
+ }
+ else {
+ // There is no loop initialization. We are thus basically a while
+ // loop. NULL out Block to force lazy block construction.
+ Block = NULL;
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+ }
+}
+
+CFGBlock* CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
+ // Objective-C fast enumeration 'for' statements:
+ // http://developer.apple.com/documentation/Cocoa/Conceptual/ObjectiveC
+ //
+ // for ( Type newVariable in collection_expression ) { statements }
+ //
+ // becomes:
+ //
+ // prologue:
+ // 1. collection_expression
+ // T. jump to loop_entry
+ // loop_entry:
+ // 1. side-effects of element expression
+ // 1. ObjCForCollectionStmt [performs binding to newVariable]
+ // T. ObjCForCollectionStmt TB, FB [jumps to TB if newVariable != nil]
+ // TB:
+ // statements
+ // T. jump to loop_entry
+ // FB:
+ // what comes after
+ //
+ // and
+ //
+ // Type existingItem;
+ // for ( existingItem in expression ) { statements }
+ //
+ // becomes:
+ //
+ // the same with newVariable replaced with existingItem; the binding
+ // works the same except that for one ObjCForCollectionStmt::getElement()
+ // returns a DeclStmt and the other returns a DeclRefExpr.
+ //
+
+ CFGBlock* LoopSuccessor = 0;
+
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ Block = 0;
+ }
+ else LoopSuccessor = Succ;
+
+ // Build the condition blocks.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(S);
+
+ // The last statement in the block should be the ObjCForCollectionStmt,
+ // which performs the actual binding to 'element' and determines if there
+ // are any more items in the collection.
+ ExitConditionBlock->appendStmt(S);
+ Block = ExitConditionBlock;
+
+ // Walk the 'element' expression to see if there are any side-effects. We
+ // generate new blocks as necesary. We DON'T add the statement by default
+ // to the CFG unless it contains control-flow.
+ EntryConditionBlock = WalkAST(S->getElement(), false);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ Block = 0;
+ }
+
+ // The condition block is the implicit successor for the loop body as
+ // well as any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // Now create the true branch.
+ {
+ // Save the current values for Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Succ(Succ),
+ save_continue(ContinueTargetBlock), save_break(BreakTargetBlock);
+
+ BreakTargetBlock = LoopSuccessor;
+ ContinueTargetBlock = EntryConditionBlock;
+
+ CFGBlock* BodyBlock = Visit(S->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "for (X in Y) ;"
+ else if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // This new body block is a successor to our "exit" condition block.
+ ExitConditionBlock->addSuccessor(BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ ExitConditionBlock->addSuccessor(LoopSuccessor);
+
+ // Now create a prologue block to contain the collection expression.
+ Block = createBlock();
+ return addStmt(S->getCollection());
+}
+
+CFGBlock* CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt* S) {
+ // FIXME: Add locking 'primitives' to CFG for @synchronized.
+
+ // Inline the body.
+ CFGBlock *SyncBlock = Visit(S->getSynchBody());
+
+ // The sync body starts its own basic block. This makes it a little easier
+ // for diagnostic clients.
+ if (SyncBlock) {
+ if (!FinishBlock(SyncBlock))
+ return 0;
+
+ Block = 0;
+ }
+
+ Succ = SyncBlock;
+
+ // Inline the sync expression.
+ return Visit(S->getSynchExpr());
+}
+
+CFGBlock* CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt* S) {
+ return NYS();
+}
+
+CFGBlock* CFGBuilder::VisitWhileStmt(WhileStmt* W) {
+ // "while" is a control-flow statement. Thus we stop processing the
+ // current block.
+
+ CFGBlock* LoopSuccessor = NULL;
+
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ }
+ else LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop
+ // can span multiple basic blocks. Thus we need the "Entry" and "Exit"
+ // blocks that evaluate the condition.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(W);
+
+ // Now add the actual condition to the condition block. Because the
+ // condition itself may contain control-flow, new blocks may be created.
+ // Thus we update "Succ" after adding the condition.
+ if (Stmt* C = W->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ assert(Block == EntryConditionBlock);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body as
+ // well as any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // Process the loop body.
+ {
+ assert(W->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
+ save_continue(ContinueTargetBlock),
+ save_break(BreakTargetBlock);
+
+ // Create an empty block to represent the transition block for looping
+ // back to the head of the loop.
+ Block = 0;
+ assert(Succ == EntryConditionBlock);
+ Succ = createBlock();
+ Succ->setLoopTarget(W);
+ ContinueTargetBlock = Succ;
+
+ // All breaks should go to the code following the loop.
+ BreakTargetBlock = LoopSuccessor;
+
+ // NULL out Block to force lazy instantiation of blocks for the body.
+ Block = NULL;
+
+ // Create the body. The returned block is the entry to the loop body.
+ CFGBlock* BodyBlock = Visit(W->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "while(...) ;"
+ else if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // Add the loop body entry as a successor to the condition.
+ ExitConditionBlock->addSuccessor(BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ ExitConditionBlock->addSuccessor(LoopSuccessor);
+
+ // There can be no more statements in the condition block
+ // since we loop back to this block. NULL out Block to force
+ // lazy creation of another block.
+ Block = NULL;
+
+ // Return the condition block, which is the dominating block for the loop.
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+}
+
+CFGBlock* CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt* S) {
+ // FIXME: This isn't complete. We basically treat @throw like a return
+ // statement.
+
+ // If we were in the middle of a block we stop processing that block
+ // and reverse its statements.
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ }
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ Block->addSuccessor(&cfg->getExit());
+
+ // Add the statement to the block. This may create new blocks
+ // if S contains control-flow (short-circuit operations).
+ return addStmt(S);
+}
+
+CFGBlock* CFGBuilder::VisitDoStmt(DoStmt* D) {
+ // "do...while" is a control-flow statement. Thus we stop processing the
+ // current block.
+
+ CFGBlock* LoopSuccessor = NULL;
+
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ LoopSuccessor = Block;
+ }
+ else LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop
+ // can span multiple basic blocks. Thus we need the "Entry" and "Exit"
+ // blocks that evaluate the condition.
+ CFGBlock* ExitConditionBlock = createBlock(false);
+ CFGBlock* EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(D);
+
+ // Now add the actual condition to the condition block. Because the
+ // condition itself may contain control-flow, new blocks may be created.
+ if (Stmt* C = D->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ if (Block) {
+ if (!FinishBlock(EntryConditionBlock))
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body.
+ Succ = EntryConditionBlock;
+
+ // Process the loop body.
+ CFGBlock* BodyBlock = NULL;
+ {
+ assert (D->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
+ save_continue(ContinueTargetBlock),
+ save_break(BreakTargetBlock);
+
+ // All continues within this loop should go to the condition block
+ ContinueTargetBlock = EntryConditionBlock;
+
+ // All breaks should go to the code following the loop.
+ BreakTargetBlock = LoopSuccessor;
+
+ // NULL out Block to force lazy instantiation of blocks for the body.
+ Block = NULL;
+
+ // Create the body. The returned block is the entry to the loop body.
+ BodyBlock = Visit(D->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "do ; while(...)"
+ else if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // Add an intermediate block between the BodyBlock and the
+ // ExitConditionBlock to represent the "loop back" transition.
+ // Create an empty block to represent the transition block for looping
+ // back to the head of the loop.
+ // FIXME: Can we do this more efficiently without adding another block?
+ Block = NULL;
+ Succ = BodyBlock;
+ CFGBlock *LoopBackBlock = createBlock();
+ LoopBackBlock->setLoopTarget(D);
+
+ // Add the loop body entry as a successor to the condition.
+ ExitConditionBlock->addSuccessor(LoopBackBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ ExitConditionBlock->addSuccessor(LoopSuccessor);
+
+ // There can be no more statements in the body block(s)
+ // since we loop back to the body. NULL out Block to force
+ // lazy creation of another block.
+ Block = NULL;
+
+ // Return the loop body, which is the dominating block for the loop.
+ Succ = BodyBlock;
+ return BodyBlock;
+}
+
+CFGBlock* CFGBuilder::VisitContinueStmt(ContinueStmt* C) {
+ // "continue" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ }
+
+ // Now create a new block that ends with the continue statement.
+ Block = createBlock(false);
+ Block->setTerminator(C);
+
+ // If there is no target for the continue, then we are looking at an
+ // incomplete AST. This means the CFG cannot be constructed.
+ if (ContinueTargetBlock)
+ Block->addSuccessor(ContinueTargetBlock);
+ else
+ badCFG = true;
+
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitBreakStmt(BreakStmt* B) {
+ // "break" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ }
+
+ // Now create a new block that ends with the continue statement.
+ Block = createBlock(false);
+ Block->setTerminator(B);
+
+ // If there is no target for the break, then we are looking at an
+ // incomplete AST. This means that the CFG cannot be constructed.
+ if (BreakTargetBlock)
+ Block->addSuccessor(BreakTargetBlock);
+ else
+ badCFG = true;
+
+
+ return Block;
+}
+
+CFGBlock* CFGBuilder::VisitSwitchStmt(SwitchStmt* Terminator) {
+ // "switch" is a control-flow statement. Thus we stop processing the
+ // current block.
+ CFGBlock* SwitchSuccessor = NULL;
+
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ SwitchSuccessor = Block;
+ }
+ else SwitchSuccessor = Succ;
+
+ // Save the current "switch" context.
+ SaveAndRestore<CFGBlock*> save_switch(SwitchTerminatedBlock),
+ save_break(BreakTargetBlock),
+ save_default(DefaultCaseBlock);
+
+ // Set the "default" case to be the block after the switch statement.
+ // If the switch statement contains a "default:", this value will
+ // be overwritten with the block for that code.
+ DefaultCaseBlock = SwitchSuccessor;
+
+ // Create a new block that will contain the switch statement.
+ SwitchTerminatedBlock = createBlock(false);
+
+ // Now process the switch body. The code after the switch is the implicit
+ // successor.
+ Succ = SwitchSuccessor;
+ BreakTargetBlock = SwitchSuccessor;
+
+ // When visiting the body, the case statements should automatically get
+ // linked up to the switch. We also don't keep a pointer to the body,
+ // since all control-flow from the switch goes to case/default statements.
+ assert (Terminator->getBody() && "switch must contain a non-NULL body");
+ Block = NULL;
+ CFGBlock *BodyBlock = Visit(Terminator->getBody());
+ if (Block) {
+ if (!FinishBlock(BodyBlock))
+ return 0;
+ }
+
+ // If we have no "default:" case, the default transition is to the
+ // code following the switch body.
+ SwitchTerminatedBlock->addSuccessor(DefaultCaseBlock);
+
+ // Add the terminator and condition in the switch block.
+ SwitchTerminatedBlock->setTerminator(Terminator);
+ assert (Terminator->getCond() && "switch condition must be non-NULL");
+ Block = SwitchTerminatedBlock;
+
+ return addStmt(Terminator->getCond());
+}
+
+CFGBlock* CFGBuilder::VisitCaseStmt(CaseStmt* Terminator) {
+ // CaseStmts are essentially labels, so they are the
+ // first statement in a block.
+
+ if (Terminator->getSubStmt()) Visit(Terminator->getSubStmt());
+ CFGBlock* CaseBlock = Block;
+ if (!CaseBlock) CaseBlock = createBlock();
+
+ // Cases statements partition blocks, so this is the top of
+ // the basic block we were processing (the "case XXX:" is the label).
+ CaseBlock->setLabel(Terminator);
+ if (!FinishBlock(CaseBlock))
+ return 0;
+
+ // Add this block to the list of successors for the block with the
+ // switch statement.
+ assert (SwitchTerminatedBlock);
+ SwitchTerminatedBlock->addSuccessor(CaseBlock);
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = CaseBlock;
+
+ return CaseBlock;
+}
+
+CFGBlock* CFGBuilder::VisitDefaultStmt(DefaultStmt* Terminator) {
+ if (Terminator->getSubStmt()) Visit(Terminator->getSubStmt());
+ DefaultCaseBlock = Block;
+ if (!DefaultCaseBlock) DefaultCaseBlock = createBlock();
+
+ // Default statements partition blocks, so this is the top of
+ // the basic block we were processing (the "default:" is the label).
+ DefaultCaseBlock->setLabel(Terminator);
+ if (!FinishBlock(DefaultCaseBlock))
+ return 0;
+
+ // Unlike case statements, we don't add the default block to the
+ // successors for the switch statement immediately. This is done
+ // when we finish processing the switch statement. This allows for
+ // the default case (including a fall-through to the code after the
+ // switch statement) to always be the last successor of a switch-terminated
+ // block.
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = DefaultCaseBlock;
+
+ return DefaultCaseBlock;
+}
+
+CFGBlock* CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt* I) {
+ // Lazily create the indirect-goto dispatch block if there isn't one
+ // already.
+ CFGBlock* IBlock = cfg->getIndirectGotoBlock();
+
+ if (!IBlock) {
+ IBlock = createBlock(false);
+ cfg->setIndirectGotoBlock(IBlock);
+ }
+
+ // IndirectGoto is a control-flow statement. Thus we stop processing the
+ // current block and create a new one.
+ if (Block) {
+ if (!FinishBlock(Block))
+ return 0;
+ }
+ Block = createBlock(false);
+ Block->setTerminator(I);
+ Block->addSuccessor(IBlock);
+ return addStmt(I->getTarget());
+}
+
+
+} // end anonymous namespace
+
+/// createBlock - Constructs and adds a new CFGBlock to the CFG. The
+/// block has no successors or predecessors. If this is the first block
+/// created in the CFG, it is automatically set to be the Entry and Exit
+/// of the CFG.
+CFGBlock* CFG::createBlock() {
+ bool first_block = begin() == end();
+
+ // Create the block.
+ Blocks.push_front(CFGBlock(NumBlockIDs++));
+
+ // If this is the first block, set it as the Entry and Exit.
+ if (first_block) Entry = Exit = &front();
+
+ // Return the block.
+ return &front();
+}
+
+/// buildCFG - Constructs a CFG from an AST. Ownership of the returned
+/// CFG is returned to the caller.
+CFG* CFG::buildCFG(Stmt* Statement) {
+ CFGBuilder Builder;
+ return Builder.buildCFG(Statement);
+}
+
+/// reverseStmts - Reverses the orders of statements within a CFGBlock.
+void CFGBlock::reverseStmts() { std::reverse(Stmts.begin(),Stmts.end()); }
+
+//===----------------------------------------------------------------------===//
+// CFG: Queries for BlkExprs.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ typedef llvm::DenseMap<const Stmt*,unsigned> BlkExprMapTy;
+}
+
+static void FindSubExprAssignments(Stmt* Terminator, llvm::SmallPtrSet<Expr*,50>& Set) {
+ if (!Terminator)
+ return;
+
+ for (Stmt::child_iterator I=Terminator->child_begin(), E=Terminator->child_end(); I!=E; ++I) {
+ if (!*I) continue;
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(*I))
+ if (B->isAssignmentOp()) Set.insert(B);
+
+ FindSubExprAssignments(*I, Set);
+ }
+}
+
+static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) {
+ BlkExprMapTy* M = new BlkExprMapTy();
+
+ // Look for assignments that are used as subexpressions. These are the
+ // only assignments that we want to *possibly* register as a block-level
+ // expression. Basically, if an assignment occurs both in a subexpression
+ // and at the block-level, it is a block-level expression.
+ llvm::SmallPtrSet<Expr*,50> SubExprAssignments;
+
+ for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I)
+ for (CFGBlock::iterator BI=I->begin(), EI=I->end(); BI != EI; ++BI)
+ FindSubExprAssignments(*BI, SubExprAssignments);
+
+ for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I) {
+
+ // Iterate over the statements again on identify the Expr* and Stmt* at
+ // the block-level that are block-level expressions.
+
+ for (CFGBlock::iterator BI=I->begin(), EI=I->end(); BI != EI; ++BI)
+ if (Expr* Exp = dyn_cast<Expr>(*BI)) {
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(Exp)) {
+ // Assignment expressions that are not nested within another
+ // expression are really "statements" whose value is never
+ // used by another expression.
+ if (B->isAssignmentOp() && !SubExprAssignments.count(Exp))
+ continue;
+ }
+ else if (const StmtExpr* Terminator = dyn_cast<StmtExpr>(Exp)) {
+ // Special handling for statement expressions. The last statement
+ // in the statement expression is also a block-level expr.
+ const CompoundStmt* C = Terminator->getSubStmt();
+ if (!C->body_empty()) {
+ unsigned x = M->size();
+ (*M)[C->body_back()] = x;
+ }
+ }
+
+ unsigned x = M->size();
+ (*M)[Exp] = x;
+ }
+
+ // Look at terminators. The condition is a block-level expression.
+
+ Stmt* S = I->getTerminatorCondition();
+
+ if (S && M->find(S) == M->end()) {
+ unsigned x = M->size();
+ (*M)[S] = x;
+ }
+ }
+
+ return M;
+}
+
+CFG::BlkExprNumTy CFG::getBlkExprNum(const Stmt* S) {
+ assert(S != NULL);
+ if (!BlkExprMap) { BlkExprMap = (void*) PopulateBlkExprMap(*this); }
+
+ BlkExprMapTy* M = reinterpret_cast<BlkExprMapTy*>(BlkExprMap);
+ BlkExprMapTy::iterator I = M->find(S);
+
+ if (I == M->end()) return CFG::BlkExprNumTy();
+ else return CFG::BlkExprNumTy(I->second);
+}
+
+unsigned CFG::getNumBlkExprs() {
+ if (const BlkExprMapTy* M = reinterpret_cast<const BlkExprMapTy*>(BlkExprMap))
+ return M->size();
+ else {
+ // We assume callers interested in the number of BlkExprs will want
+ // the map constructed if it doesn't already exist.
+ BlkExprMap = (void*) PopulateBlkExprMap(*this);
+ return reinterpret_cast<BlkExprMapTy*>(BlkExprMap)->size();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup: CFG dstor.
+//===----------------------------------------------------------------------===//
+
+CFG::~CFG() {
+ delete reinterpret_cast<const BlkExprMapTy*>(BlkExprMap);
+}
+
+//===----------------------------------------------------------------------===//
+// CFG pretty printing
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN StmtPrinterHelper : public PrinterHelper {
+
+ typedef llvm::DenseMap<Stmt*,std::pair<unsigned,unsigned> > StmtMapTy;
+ StmtMapTy StmtMap;
+ signed CurrentBlock;
+ unsigned CurrentStmt;
+
+public:
+
+ StmtPrinterHelper(const CFG* cfg) : CurrentBlock(0), CurrentStmt(0) {
+ for (CFG::const_iterator I = cfg->begin(), E = cfg->end(); I != E; ++I ) {
+ unsigned j = 1;
+ for (CFGBlock::const_iterator BI = I->begin(), BEnd = I->end() ;
+ BI != BEnd; ++BI, ++j )
+ StmtMap[*BI] = std::make_pair(I->getBlockID(),j);
+ }
+ }
+
+ virtual ~StmtPrinterHelper() {}
+
+ void setBlockID(signed i) { CurrentBlock = i; }
+ void setStmtID(unsigned i) { CurrentStmt = i; }
+
+ virtual bool handledStmt(Stmt* Terminator, llvm::raw_ostream& OS) {
+
+ StmtMapTy::iterator I = StmtMap.find(Terminator);
+
+ if (I == StmtMap.end())
+ return false;
+
+ if (CurrentBlock >= 0 && I->second.first == (unsigned) CurrentBlock
+ && I->second.second == CurrentStmt)
+ return false;
+
+ OS << "[B" << I->second.first << "." << I->second.second << "]";
+ return true;
+ }
+};
+
+class VISIBILITY_HIDDEN CFGBlockTerminatorPrint
+ : public StmtVisitor<CFGBlockTerminatorPrint,void> {
+
+ llvm::raw_ostream& OS;
+ StmtPrinterHelper* Helper;
+ PrintingPolicy Policy;
+
+public:
+ CFGBlockTerminatorPrint(llvm::raw_ostream& os, StmtPrinterHelper* helper,
+ const PrintingPolicy &Policy = PrintingPolicy())
+ : OS(os), Helper(helper), Policy(Policy) {}
+
+ void VisitIfStmt(IfStmt* I) {
+ OS << "if ";
+ I->getCond()->printPretty(OS,Helper,Policy);
+ }
+
+ // Default case.
+ void VisitStmt(Stmt* Terminator) { Terminator->printPretty(OS, Helper, Policy); }
+
+ void VisitForStmt(ForStmt* F) {
+ OS << "for (" ;
+ if (F->getInit()) OS << "...";
+ OS << "; ";
+ if (Stmt* C = F->getCond()) C->printPretty(OS, Helper, Policy);
+ OS << "; ";
+ if (F->getInc()) OS << "...";
+ OS << ")";
+ }
+
+ void VisitWhileStmt(WhileStmt* W) {
+ OS << "while " ;
+ if (Stmt* C = W->getCond()) C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitDoStmt(DoStmt* D) {
+ OS << "do ... while ";
+ if (Stmt* C = D->getCond()) C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitSwitchStmt(SwitchStmt* Terminator) {
+ OS << "switch ";
+ Terminator->getCond()->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitConditionalOperator(ConditionalOperator* C) {
+ C->getCond()->printPretty(OS, Helper, Policy);
+ OS << " ? ... : ...";
+ }
+
+ void VisitChooseExpr(ChooseExpr* C) {
+ OS << "__builtin_choose_expr( ";
+ C->getCond()->printPretty(OS, Helper, Policy);
+ OS << " )";
+ }
+
+ void VisitIndirectGotoStmt(IndirectGotoStmt* I) {
+ OS << "goto *";
+ I->getTarget()->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitBinaryOperator(BinaryOperator* B) {
+ if (!B->isLogicalOp()) {
+ VisitExpr(B);
+ return;
+ }
+
+ B->getLHS()->printPretty(OS, Helper, Policy);
+
+ switch (B->getOpcode()) {
+ case BinaryOperator::LOr:
+ OS << " || ...";
+ return;
+ case BinaryOperator::LAnd:
+ OS << " && ...";
+ return;
+ default:
+ assert(false && "Invalid logical operator.");
+ }
+ }
+
+ void VisitExpr(Expr* E) {
+ E->printPretty(OS, Helper, Policy);
+ }
+};
+
+
+void print_stmt(llvm::raw_ostream&OS, StmtPrinterHelper* Helper, Stmt* Terminator) {
+ if (Helper) {
+ // special printing for statement-expressions.
+ if (StmtExpr* SE = dyn_cast<StmtExpr>(Terminator)) {
+ CompoundStmt* Sub = SE->getSubStmt();
+
+ if (Sub->child_begin() != Sub->child_end()) {
+ OS << "({ ... ; ";
+ Helper->handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
+ OS << " })\n";
+ return;
+ }
+ }
+
+ // special printing for comma expressions.
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(Terminator)) {
+ if (B->getOpcode() == BinaryOperator::Comma) {
+ OS << "... , ";
+ Helper->handledStmt(B->getRHS(),OS);
+ OS << '\n';
+ return;
+ }
+ }
+ }
+
+ Terminator->printPretty(OS, Helper, /*FIXME:*/PrintingPolicy());
+
+ // Expressions need a newline.
+ if (isa<Expr>(Terminator)) OS << '\n';
+}
+
+void print_block(llvm::raw_ostream& OS, const CFG* cfg, const CFGBlock& B,
+ StmtPrinterHelper* Helper, bool print_edges) {
+
+ if (Helper) Helper->setBlockID(B.getBlockID());
+
+ // Print the header.
+ OS << "\n [ B" << B.getBlockID();
+
+ if (&B == &cfg->getEntry())
+ OS << " (ENTRY) ]\n";
+ else if (&B == &cfg->getExit())
+ OS << " (EXIT) ]\n";
+ else if (&B == cfg->getIndirectGotoBlock())
+ OS << " (INDIRECT GOTO DISPATCH) ]\n";
+ else
+ OS << " ]\n";
+
+ // Print the label of this block.
+ if (Stmt* Terminator = const_cast<Stmt*>(B.getLabel())) {
+
+ if (print_edges)
+ OS << " ";
+
+ if (LabelStmt* L = dyn_cast<LabelStmt>(Terminator))
+ OS << L->getName();
+ else if (CaseStmt* C = dyn_cast<CaseStmt>(Terminator)) {
+ OS << "case ";
+ C->getLHS()->printPretty(OS, Helper, /*FIXME:*/PrintingPolicy());
+ if (C->getRHS()) {
+ OS << " ... ";
+ C->getRHS()->printPretty(OS, Helper, /*FIXME:*/PrintingPolicy());
+ }
+ }
+ else if (isa<DefaultStmt>(Terminator))
+ OS << "default";
+ else
+ assert(false && "Invalid label statement in CFGBlock.");
+
+ OS << ":\n";
+ }
+
+ // Iterate through the statements in the block and print them.
+ unsigned j = 1;
+
+ for (CFGBlock::const_iterator I = B.begin(), E = B.end() ;
+ I != E ; ++I, ++j ) {
+
+ // Print the statement # in the basic block and the statement itself.
+ if (print_edges)
+ OS << " ";
+
+ OS << llvm::format("%3d", j) << ": ";
+
+ if (Helper)
+ Helper->setStmtID(j);
+
+ print_stmt(OS,Helper,*I);
+ }
+
+ // Print the terminator of this block.
+ if (B.getTerminator()) {
+ if (print_edges)
+ OS << " ";
+
+ OS << " T: ";
+
+ if (Helper) Helper->setBlockID(-1);
+
+ CFGBlockTerminatorPrint TPrinter(OS, Helper, /*FIXME*/PrintingPolicy());
+ TPrinter.Visit(const_cast<Stmt*>(B.getTerminator()));
+ OS << '\n';
+ }
+
+ if (print_edges) {
+ // Print the predecessors of this block.
+ OS << " Predecessors (" << B.pred_size() << "):";
+ unsigned i = 0;
+
+ for (CFGBlock::const_pred_iterator I = B.pred_begin(), E = B.pred_end();
+ I != E; ++I, ++i) {
+
+ if (i == 8 || (i-8) == 0)
+ OS << "\n ";
+
+ OS << " B" << (*I)->getBlockID();
+ }
+
+ OS << '\n';
+
+ // Print the successors of this block.
+ OS << " Successors (" << B.succ_size() << "):";
+ i = 0;
+
+ for (CFGBlock::const_succ_iterator I = B.succ_begin(), E = B.succ_end();
+ I != E; ++I, ++i) {
+
+ if (i == 8 || (i-8) % 10 == 0)
+ OS << "\n ";
+
+ OS << " B" << (*I)->getBlockID();
+ }
+
+ OS << '\n';
+ }
+}
+
+} // end anonymous namespace
+
+/// dump - A simple pretty printer of a CFG that outputs to stderr.
+void CFG::dump() const { print(llvm::errs()); }
+
+/// print - A simple pretty printer of a CFG that outputs to an ostream.
+void CFG::print(llvm::raw_ostream& OS) const {
+
+ StmtPrinterHelper Helper(this);
+
+ // Print the entry block.
+ print_block(OS, this, getEntry(), &Helper, true);
+
+ // Iterate through the CFGBlocks and print them one by one.
+ for (const_iterator I = Blocks.begin(), E = Blocks.end() ; I != E ; ++I) {
+ // Skip the entry block, because we already printed it.
+ if (&(*I) == &getEntry() || &(*I) == &getExit())
+ continue;
+
+ print_block(OS, this, *I, &Helper, true);
+ }
+
+ // Print the exit block.
+ print_block(OS, this, getExit(), &Helper, true);
+ OS.flush();
+}
+
+/// dump - A simply pretty printer of a CFGBlock that outputs to stderr.
+void CFGBlock::dump(const CFG* cfg) const { print(llvm::errs(), cfg); }
+
+/// print - A simple pretty printer of a CFGBlock that outputs to an ostream.
+/// Generally this will only be called from CFG::print.
+void CFGBlock::print(llvm::raw_ostream& OS, const CFG* cfg) const {
+ StmtPrinterHelper Helper(cfg);
+ print_block(OS, cfg, *this, &Helper, true);
+}
+
+/// printTerminator - A simple pretty printer of the terminator of a CFGBlock.
+void CFGBlock::printTerminator(llvm::raw_ostream& OS) const {
+ CFGBlockTerminatorPrint TPrinter(OS,NULL);
+ TPrinter.Visit(const_cast<Stmt*>(getTerminator()));
+}
+
+Stmt* CFGBlock::getTerminatorCondition() {
+
+ if (!Terminator)
+ return NULL;
+
+ Expr* E = NULL;
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::ForStmtClass:
+ E = cast<ForStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::WhileStmtClass:
+ E = cast<WhileStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::DoStmtClass:
+ E = cast<DoStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::IfStmtClass:
+ E = cast<IfStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::ChooseExprClass:
+ E = cast<ChooseExpr>(Terminator)->getCond();
+ break;
+
+ case Stmt::IndirectGotoStmtClass:
+ E = cast<IndirectGotoStmt>(Terminator)->getTarget();
+ break;
+
+ case Stmt::SwitchStmtClass:
+ E = cast<SwitchStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::ConditionalOperatorClass:
+ E = cast<ConditionalOperator>(Terminator)->getCond();
+ break;
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ E = cast<BinaryOperator>(Terminator)->getLHS();
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ return Terminator;
+ }
+
+ return E ? E->IgnoreParens() : NULL;
+}
+
+bool CFGBlock::hasBinaryBranchTerminator() const {
+
+ if (!Terminator)
+ return false;
+
+ Expr* E = NULL;
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ return false;
+
+ case Stmt::ForStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::IfStmtClass:
+ case Stmt::ChooseExprClass:
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::BinaryOperatorClass:
+ return true;
+ }
+
+ return E ? E->IgnoreParens() : NULL;
+}
+
+
+//===----------------------------------------------------------------------===//
+// CFG Graphviz Visualization
+//===----------------------------------------------------------------------===//
+
+
+#ifndef NDEBUG
+static StmtPrinterHelper* GraphHelper;
+#endif
+
+void CFG::viewCFG() const {
+#ifndef NDEBUG
+ StmtPrinterHelper H(this);
+ GraphHelper = &H;
+ llvm::ViewGraph(this,"CFG");
+ GraphHelper = NULL;
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
+ static std::string getNodeLabel(const CFGBlock* Node, const CFG* Graph) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+ print_block(Out,Graph, *Node, GraphHelper, false);
+ std::string& OutStr = Out.str();
+
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
new file mode 100644
index 0000000..19ab9f6
--- /dev/null
+++ b/lib/AST/CMakeLists.txt
@@ -0,0 +1,32 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangAST
+ APValue.cpp
+ ASTConsumer.cpp
+ ASTContext.cpp
+ Builtins.cpp
+ CFG.cpp
+ DeclarationName.cpp
+ DeclBase.cpp
+ Decl.cpp
+ DeclCXX.cpp
+ DeclGroup.cpp
+ DeclObjC.cpp
+ DeclPrinter.cpp
+ DeclTemplate.cpp
+ ExprConstant.cpp
+ Expr.cpp
+ ExprCXX.cpp
+ InheritViz.cpp
+ NestedNameSpecifier.cpp
+ ParentMap.cpp
+ Stmt.cpp
+ StmtDumper.cpp
+ StmtIterator.cpp
+ StmtPrinter.cpp
+ StmtViz.cpp
+ TemplateName.cpp
+ Type.cpp
+ )
+
+add_dependencies(clangAST ClangDiagnosticAST)
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
new file mode 100644
index 0000000..cb3ec1f
--- /dev/null
+++ b/lib/AST/Decl.cpp
@@ -0,0 +1,630 @@
+//===--- Decl.cpp - Declaration AST Node Implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/IdentifierTable.h"
+#include <vector>
+
+using namespace clang;
+
+void Attr::Destroy(ASTContext &C) {
+ if (Next) {
+ Next->Destroy(C);
+ Next = 0;
+ }
+ this->~Attr();
+ C.Deallocate((void*)this);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+
+TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
+ return new (C) TranslationUnitDecl();
+}
+
+NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id) {
+ return new (C) NamespaceDecl(DC, L, Id);
+}
+
+void NamespaceDecl::Destroy(ASTContext& C) {
+ // NamespaceDecl uses "NextDeclarator" to chain namespace declarations
+ // together. They are all top-level Decls.
+
+ this->~NamespaceDecl();
+ C.Deallocate((void *)this);
+}
+
+
+ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id, QualType T) {
+ return new (C) ImplicitParamDecl(ImplicitParam, DC, L, Id, T);
+}
+
+const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
+ switch (SC) {
+ case VarDecl::None: break;
+ case VarDecl::Auto: return "auto"; break;
+ case VarDecl::Extern: return "extern"; break;
+ case VarDecl::PrivateExtern: return "__private_extern__"; break;
+ case VarDecl::Register: return "register"; break;
+ case VarDecl::Static: return "static"; break;
+ }
+
+ assert(0 && "Invalid storage class");
+ return 0;
+}
+
+ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ QualType T, StorageClass S,
+ Expr *DefArg) {
+ return new (C) ParmVarDecl(ParmVar, DC, L, Id, T, S, DefArg);
+}
+
+QualType ParmVarDecl::getOriginalType() const {
+ if (const OriginalParmVarDecl *PVD =
+ dyn_cast<OriginalParmVarDecl>(this))
+ return PVD->OriginalType;
+ return getType();
+}
+
+void VarDecl::setInit(ASTContext &C, Expr *I) {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>()) {
+ Eval->~EvaluatedStmt();
+ C.Deallocate(Eval);
+ }
+
+ Init = I;
+ }
+
+bool VarDecl::isExternC(ASTContext &Context) const {
+ if (!Context.getLangOptions().CPlusPlus)
+ return (getDeclContext()->isTranslationUnit() &&
+ getStorageClass() != Static) ||
+ (getDeclContext()->isFunctionOrMethod() && hasExternalStorage());
+
+ for (const DeclContext *DC = getDeclContext(); !DC->isTranslationUnit();
+ DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC)) {
+ if (Linkage->getLanguage() == LinkageSpecDecl::lang_c)
+ return getStorageClass() != Static;
+
+ break;
+ }
+
+ if (DC->isFunctionOrMethod())
+ return false;
+ }
+
+ return false;
+}
+
+OriginalParmVarDecl *OriginalParmVarDecl::Create(
+ ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ QualType T, QualType OT, StorageClass S,
+ Expr *DefArg) {
+ return new (C) OriginalParmVarDecl(DC, L, Id, T, OT, S, DefArg);
+}
+
+FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ DeclarationName N, QualType T,
+ StorageClass S, bool isInline,
+ bool hasWrittenPrototype,
+ SourceLocation TypeSpecStartLoc) {
+ FunctionDecl *New
+ = new (C) FunctionDecl(Function, DC, L, N, T, S, isInline,
+ TypeSpecStartLoc);
+ New->HasWrittenPrototype = hasWrittenPrototype;
+ return New;
+}
+
+BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
+ return new (C) BlockDecl(DC, L);
+}
+
+FieldDecl *FieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, Expr *BW,
+ bool Mutable) {
+ return new (C) FieldDecl(Decl::Field, DC, L, Id, T, BW, Mutable);
+}
+
+bool FieldDecl::isAnonymousStructOrUnion() const {
+ if (!isImplicit() || getDeclName())
+ return false;
+
+ if (const RecordType *Record = getType()->getAsRecordType())
+ return Record->getDecl()->isAnonymousStructOrUnion();
+
+ return false;
+}
+
+EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
+ SourceLocation L,
+ IdentifierInfo *Id, QualType T,
+ Expr *E, const llvm::APSInt &V) {
+ return new (C) EnumConstantDecl(CD, L, Id, T, E, V);
+}
+
+void EnumConstantDecl::Destroy(ASTContext& C) {
+ if (Init) Init->Destroy(C);
+ Decl::Destroy(C);
+}
+
+TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id, QualType T) {
+ return new (C) TypedefDecl(DC, L, Id, T);
+}
+
+EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id,
+ EnumDecl *PrevDecl) {
+ EnumDecl *Enum = new (C) EnumDecl(DC, L, Id);
+ C.getTypeDeclType(Enum, PrevDecl);
+ return Enum;
+}
+
+void EnumDecl::Destroy(ASTContext& C) {
+ Decl::Destroy(C);
+}
+
+void EnumDecl::completeDefinition(ASTContext &C, QualType NewType) {
+ assert(!isDefinition() && "Cannot redefine enums!");
+ IntegerType = NewType;
+ TagDecl::completeDefinition();
+}
+
+FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ StringLiteral *Str) {
+ return new (C) FileScopeAsmDecl(DC, L, Str);
+}
+
+//===----------------------------------------------------------------------===//
+// NamedDecl Implementation
+//===----------------------------------------------------------------------===//
+
+std::string NamedDecl::getQualifiedNameAsString() const {
+ std::vector<std::string> Names;
+ std::string QualName;
+ const DeclContext *Ctx = getDeclContext();
+
+ if (Ctx->isFunctionOrMethod())
+ return getNameAsString();
+
+ while (Ctx) {
+ if (Ctx->isFunctionOrMethod())
+ // FIXME: That probably will happen, when D was member of local
+ // scope class/struct/union. How do we handle this case?
+ break;
+
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ PrintingPolicy Policy;
+ Policy.CPlusPlus = true;
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.getFlatArgumentList(),
+ TemplateArgs.flat_size(),
+ Policy);
+ Names.push_back(Spec->getIdentifier()->getName() + TemplateArgsStr);
+ } else if (const NamedDecl *ND = dyn_cast<NamedDecl>(Ctx))
+ Names.push_back(ND->getNameAsString());
+ else
+ break;
+
+ Ctx = Ctx->getParent();
+ }
+
+ std::vector<std::string>::reverse_iterator
+ I = Names.rbegin(),
+ End = Names.rend();
+
+ for (; I!=End; ++I)
+ QualName += *I + "::";
+
+ QualName += getNameAsString();
+
+ return QualName;
+}
+
+
+bool NamedDecl::declarationReplaces(NamedDecl *OldD) const {
+ assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch");
+
+ // UsingDirectiveDecl's are not really NamedDecl's, and all have same name.
+ // We want to keep it, unless it nominates same namespace.
+ if (getKind() == Decl::UsingDirective) {
+ return cast<UsingDirectiveDecl>(this)->getNominatedNamespace() ==
+ cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace();
+ }
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
+ // For function declarations, we keep track of redeclarations.
+ return FD->getPreviousDeclaration() == OldD;
+
+ // For method declarations, we keep track of redeclarations.
+ if (isa<ObjCMethodDecl>(this))
+ return false;
+
+ // For non-function declarations, if the declarations are of the
+ // same kind then this must be a redeclaration, or semantic analysis
+ // would not have given us the new declaration.
+ return this->getKind() == OldD->getKind();
+}
+
+bool NamedDecl::hasLinkage() const {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(this))
+ return VD->hasExternalStorage() || VD->isFileVarDecl();
+
+ if (isa<FunctionDecl>(this) && !isa<CXXMethodDecl>(this))
+ return true;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// VarDecl Implementation
+//===----------------------------------------------------------------------===//
+
+VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, StorageClass S,
+ SourceLocation TypeSpecStartLoc) {
+ return new (C) VarDecl(Var, DC, L, Id, T, S, TypeSpecStartLoc);
+}
+
+void VarDecl::Destroy(ASTContext& C) {
+ Expr *Init = getInit();
+ if (Init) {
+ Init->Destroy(C);
+ if (EvaluatedStmt *Eval = this->Init.dyn_cast<EvaluatedStmt *>()) {
+ Eval->~EvaluatedStmt();
+ C.Deallocate(Eval);
+ }
+ }
+ this->~VarDecl();
+ C.Deallocate((void *)this);
+}
+
+VarDecl::~VarDecl() {
+}
+
+bool VarDecl::isTentativeDefinition(ASTContext &Context) const {
+ if (!isFileVarDecl() || Context.getLangOptions().CPlusPlus)
+ return false;
+
+ const VarDecl *Def = 0;
+ return (!getDefinition(Def) &&
+ (getStorageClass() == None || getStorageClass() == Static));
+}
+
+const Expr *VarDecl::getDefinition(const VarDecl *&Def) const {
+ Def = this;
+ while (Def && !Def->getInit())
+ Def = Def->getPreviousDeclaration();
+
+ return Def? Def->getInit() : 0;
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FunctionDecl::Destroy(ASTContext& C) {
+ if (Body && Body.isOffset())
+ Body.get(C.getExternalSource())->Destroy(C);
+
+ for (param_iterator I=param_begin(), E=param_end(); I!=E; ++I)
+ (*I)->Destroy(C);
+
+ C.Deallocate(ParamInfo);
+
+ Decl::Destroy(C);
+}
+
+
+Stmt *FunctionDecl::getBody(ASTContext &Context,
+ const FunctionDecl *&Definition) const {
+ for (const FunctionDecl *FD = this; FD != 0; FD = FD->PreviousDeclaration) {
+ if (FD->Body) {
+ Definition = FD;
+ return FD->Body.get(Context.getExternalSource());
+ }
+ }
+
+ return 0;
+}
+
+Stmt *FunctionDecl::getBodyIfAvailable() const {
+ for (const FunctionDecl *FD = this; FD != 0; FD = FD->PreviousDeclaration) {
+ if (FD->Body && !FD->Body.isOffset()) {
+ return FD->Body.get(0);
+ }
+ }
+
+ return 0;
+}
+
+bool FunctionDecl::isMain() const {
+ return getDeclContext()->getLookupContext()->isTranslationUnit() &&
+ getIdentifier() && getIdentifier()->isStr("main");
+}
+
+bool FunctionDecl::isExternC(ASTContext &Context) const {
+ // In C, any non-static, non-overloadable function has external
+ // linkage.
+ if (!Context.getLangOptions().CPlusPlus)
+ return getStorageClass() != Static && !getAttr<OverloadableAttr>();
+
+ for (const DeclContext *DC = getDeclContext(); !DC->isTranslationUnit();
+ DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC)) {
+ if (Linkage->getLanguage() == LinkageSpecDecl::lang_c)
+ return getStorageClass() != Static && !getAttr<OverloadableAttr>();
+
+ break;
+ }
+ }
+
+ return false;
+}
+
+bool FunctionDecl::isGlobal() const {
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(this))
+ return Method->isStatic();
+
+ if (getStorageClass() == Static)
+ return false;
+
+ for (const DeclContext *DC = getDeclContext();
+ DC->isNamespace();
+ DC = DC->getParent()) {
+ if (const NamespaceDecl *Namespace = cast<NamespaceDecl>(DC)) {
+ if (!Namespace->getDeclName())
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+/// \brief Returns a value indicating whether this function
+/// corresponds to a builtin function.
+///
+/// The function corresponds to a built-in function if it is
+/// declared at translation scope or within an extern "C" block and
+/// its name matches with the name of a builtin. The returned value
+/// will be 0 for functions that do not correspond to a builtin, a
+/// value of type \c Builtin::ID if in the target-independent range
+/// \c [1,Builtin::First), or a target-specific builtin value.
+unsigned FunctionDecl::getBuiltinID(ASTContext &Context) const {
+ if (!getIdentifier() || !getIdentifier()->getBuiltinID())
+ return 0;
+
+ unsigned BuiltinID = getIdentifier()->getBuiltinID();
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return BuiltinID;
+
+ // This function has the name of a known C library
+ // function. Determine whether it actually refers to the C library
+ // function or whether it just has the same name.
+
+ // If this is a static function, it's not a builtin.
+ if (getStorageClass() == Static)
+ return 0;
+
+ // If this function is at translation-unit scope and we're not in
+ // C++, it refers to the C library function.
+ if (!Context.getLangOptions().CPlusPlus &&
+ getDeclContext()->isTranslationUnit())
+ return BuiltinID;
+
+ // If the function is in an extern "C" linkage specification and is
+ // not marked "overloadable", it's the real function.
+ if (isa<LinkageSpecDecl>(getDeclContext()) &&
+ cast<LinkageSpecDecl>(getDeclContext())->getLanguage()
+ == LinkageSpecDecl::lang_c &&
+ !getAttr<OverloadableAttr>())
+ return BuiltinID;
+
+ // Not a builtin
+ return 0;
+}
+
+
+/// getNumParams - Return the number of parameters this function must have
+/// based on its FunctionType. This is the length of the PararmInfo array
+/// after it has been created.
+unsigned FunctionDecl::getNumParams() const {
+ const FunctionType *FT = getType()->getAsFunctionType();
+ if (isa<FunctionNoProtoType>(FT))
+ return 0;
+ return cast<FunctionProtoType>(FT)->getNumArgs();
+
+}
+
+void FunctionDecl::setParams(ASTContext& C, ParmVarDecl **NewParamInfo,
+ unsigned NumParams) {
+ assert(ParamInfo == 0 && "Already has param info!");
+ assert(NumParams == getNumParams() && "Parameter count mismatch!");
+
+ // Zero params -> null pointer.
+ if (NumParams) {
+ void *Mem = C.Allocate(sizeof(ParmVarDecl*)*NumParams);
+ ParamInfo = new (Mem) ParmVarDecl*[NumParams];
+ memcpy(ParamInfo, NewParamInfo, sizeof(ParmVarDecl*)*NumParams);
+ }
+}
+
+/// getMinRequiredArguments - Returns the minimum number of arguments
+/// needed to call this function. This may be fewer than the number of
+/// function parameters, if some of the parameters have default
+/// arguments (in C++).
+unsigned FunctionDecl::getMinRequiredArguments() const {
+ unsigned NumRequiredArgs = getNumParams();
+ while (NumRequiredArgs > 0
+ && getParamDecl(NumRequiredArgs-1)->getDefaultArg())
+ --NumRequiredArgs;
+
+ return NumRequiredArgs;
+}
+
+bool FunctionDecl::hasActiveGNUInlineAttribute() const {
+ if (!isInline() || !hasAttr<GNUInlineAttr>())
+ return false;
+
+ for (const FunctionDecl *FD = getPreviousDeclaration(); FD;
+ FD = FD->getPreviousDeclaration()) {
+ if (FD->isInline() && !FD->hasAttr<GNUInlineAttr>())
+ return false;
+ }
+
+ return true;
+}
+
+bool FunctionDecl::isExternGNUInline() const {
+ if (!hasActiveGNUInlineAttribute())
+ return false;
+
+ for (const FunctionDecl *FD = this; FD; FD = FD->getPreviousDeclaration())
+ if (FD->getStorageClass() == Extern && FD->hasAttr<GNUInlineAttr>())
+ return true;
+
+ return false;
+}
+
+/// getOverloadedOperator - Which C++ overloaded operator this
+/// function represents, if any.
+OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const {
+ if (getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
+ return getDeclName().getCXXOverloadedOperator();
+ else
+ return OO_None;
+}
+
+//===----------------------------------------------------------------------===//
+// TagDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void TagDecl::startDefinition() {
+ TagType *TagT = const_cast<TagType *>(TypeForDecl->getAsTagType());
+ TagT->decl.setPointer(this);
+ TagT->getAsTagType()->decl.setInt(1);
+}
+
+void TagDecl::completeDefinition() {
+ assert((!TypeForDecl ||
+ TypeForDecl->getAsTagType()->decl.getPointer() == this) &&
+ "Attempt to redefine a tag definition?");
+ IsDefinition = true;
+ TagType *TagT = const_cast<TagType *>(TypeForDecl->getAsTagType());
+ TagT->decl.setPointer(this);
+ TagT->decl.setInt(0);
+}
+
+TagDecl* TagDecl::getDefinition(ASTContext& C) const {
+ QualType T = C.getTypeDeclType(const_cast<TagDecl*>(this));
+ TagDecl* D = cast<TagDecl>(T->getAsTagType()->getDecl());
+ return D->isDefinition() ? D : 0;
+}
+
+//===----------------------------------------------------------------------===//
+// RecordDecl Implementation
+//===----------------------------------------------------------------------===//
+
+RecordDecl::RecordDecl(Kind DK, TagKind TK, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id)
+ : TagDecl(DK, TK, DC, L, Id) {
+ HasFlexibleArrayMember = false;
+ AnonymousStructOrUnion = false;
+ assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
+}
+
+RecordDecl *RecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ RecordDecl* PrevDecl) {
+
+ RecordDecl* R = new (C) RecordDecl(Record, TK, DC, L, Id);
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+RecordDecl::~RecordDecl() {
+}
+
+void RecordDecl::Destroy(ASTContext& C) {
+ TagDecl::Destroy(C);
+}
+
+bool RecordDecl::isInjectedClassName() const {
+ return isImplicit() && getDeclName() && getDeclContext()->isRecord() &&
+ cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName();
+}
+
+/// completeDefinition - Notes that the definition of this type is now
+/// complete.
+void RecordDecl::completeDefinition(ASTContext& C) {
+ assert(!isDefinition() && "Cannot redefine record!");
+ TagDecl::completeDefinition();
+}
+
+//===----------------------------------------------------------------------===//
+// BlockDecl Implementation
+//===----------------------------------------------------------------------===//
+
+BlockDecl::~BlockDecl() {
+}
+
+void BlockDecl::Destroy(ASTContext& C) {
+ if (Body)
+ Body->Destroy(C);
+
+ for (param_iterator I=param_begin(), E=param_end(); I!=E; ++I)
+ (*I)->Destroy(C);
+
+ C.Deallocate(ParamInfo);
+ Decl::Destroy(C);
+}
+
+void BlockDecl::setParams(ASTContext& C, ParmVarDecl **NewParamInfo,
+ unsigned NParms) {
+ assert(ParamInfo == 0 && "Already has param info!");
+
+ // Zero params -> null pointer.
+ if (NParms) {
+ NumParams = NParms;
+ void *Mem = C.Allocate(sizeof(ParmVarDecl*)*NumParams);
+ ParamInfo = new (Mem) ParmVarDecl*[NumParams];
+ memcpy(ParamInfo, NewParamInfo, sizeof(ParmVarDecl*)*NumParams);
+ }
+}
+
+unsigned BlockDecl::getNumParams() const {
+ return NumParams;
+}
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
new file mode 100644
index 0000000..fd7de71
--- /dev/null
+++ b/lib/AST/DeclBase.cpp
@@ -0,0 +1,756 @@
+//===--- DeclBase.cpp - Declaration AST Node Implementation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl and DeclContext classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstdio>
+#include <vector>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Statistics
+//===----------------------------------------------------------------------===//
+
+#define DECL(Derived, Base) static int n##Derived##s = 0;
+#include "clang/AST/DeclNodes.def"
+
+static bool StatSwitch = false;
+
+// This keeps track of all decl attributes. Since so few decls have attrs, we
+// keep them in a hash map instead of wasting space in the Decl class.
+typedef llvm::DenseMap<const Decl*, Attr*> DeclAttrMapTy;
+
+static DeclAttrMapTy *DeclAttrs = 0;
+
+const char *Decl::getDeclKindName() const {
+ switch (DeclKind) {
+ default: assert(0 && "Declaration not in DeclNodes.def!");
+#define DECL(Derived, Base) case Derived: return #Derived;
+#include "clang/AST/DeclNodes.def"
+ }
+}
+
+const char *DeclContext::getDeclKindName() const {
+ switch (DeclKind) {
+ default: assert(0 && "Declaration context not in DeclNodes.def!");
+#define DECL(Derived, Base) case Decl::Derived: return #Derived;
+#include "clang/AST/DeclNodes.def"
+ }
+}
+
+bool Decl::CollectingStats(bool Enable) {
+ if (Enable)
+ StatSwitch = true;
+ return StatSwitch;
+}
+
+void Decl::PrintStats() {
+ fprintf(stderr, "*** Decl Stats:\n");
+
+ int totalDecls = 0;
+#define DECL(Derived, Base) totalDecls += n##Derived##s;
+#include "clang/AST/DeclNodes.def"
+ fprintf(stderr, " %d decls total.\n", totalDecls);
+
+ int totalBytes = 0;
+#define DECL(Derived, Base) \
+ if (n##Derived##s > 0) { \
+ totalBytes += (int)(n##Derived##s * sizeof(Derived##Decl)); \
+ fprintf(stderr, " %d " #Derived " decls, %d each (%d bytes)\n", \
+ n##Derived##s, (int)sizeof(Derived##Decl), \
+ (int)(n##Derived##s * sizeof(Derived##Decl))); \
+ }
+#include "clang/AST/DeclNodes.def"
+
+ fprintf(stderr, "Total bytes = %d\n", totalBytes);
+}
+
+void Decl::addDeclKind(Kind k) {
+ switch (k) {
+ default: assert(0 && "Declaration not in DeclNodes.def!");
+#define DECL(Derived, Base) case Derived: ++n##Derived##s; break;
+#include "clang/AST/DeclNodes.def"
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// PrettyStackTraceDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void PrettyStackTraceDecl::print(llvm::raw_ostream &OS) const {
+ SourceLocation TheLoc = Loc;
+ if (TheLoc.isInvalid() && TheDecl)
+ TheLoc = TheDecl->getLocation();
+
+ if (TheLoc.isValid()) {
+ TheLoc.print(OS, SM);
+ OS << ": ";
+ }
+
+ OS << Message;
+
+ if (NamedDecl *DN = dyn_cast_or_null<NamedDecl>(TheDecl))
+ OS << " '" << DN->getQualifiedNameAsString() << '\'';
+ OS << '\n';
+}
+
+//===----------------------------------------------------------------------===//
+// Decl Implementation
+//===----------------------------------------------------------------------===//
+
+// Out-of-line virtual method providing a home for Decl.
+Decl::~Decl() {
+ if (isOutOfSemaDC())
+ delete getMultipleDC();
+
+ assert(!HasAttrs && "attributes should have been freed by Destroy");
+}
+
+void Decl::setDeclContext(DeclContext *DC) {
+ if (isOutOfSemaDC())
+ delete getMultipleDC();
+
+ DeclCtx = DC;
+}
+
+void Decl::setLexicalDeclContext(DeclContext *DC) {
+ if (DC == getLexicalDeclContext())
+ return;
+
+ if (isInSemaDC()) {
+ MultipleDC *MDC = new MultipleDC();
+ MDC->SemanticDC = getDeclContext();
+ MDC->LexicalDC = DC;
+ DeclCtx = MDC;
+ } else {
+ getMultipleDC()->LexicalDC = DC;
+ }
+}
+
+unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
+ switch (DeclKind) {
+ default:
+ if (DeclKind >= FunctionFirst && DeclKind <= FunctionLast)
+ return IDNS_Ordinary;
+ assert(0 && "Unknown decl kind!");
+ case OverloadedFunction:
+ case Typedef:
+ case EnumConstant:
+ case Var:
+ case ImplicitParam:
+ case ParmVar:
+ case OriginalParmVar:
+ case NonTypeTemplateParm:
+ case ObjCMethod:
+ case ObjCContainer:
+ case ObjCCategory:
+ case ObjCInterface:
+ case ObjCProperty:
+ case ObjCCompatibleAlias:
+ return IDNS_Ordinary;
+
+ case ObjCProtocol:
+ return IDNS_ObjCProtocol;
+
+ case ObjCImplementation:
+ return IDNS_ObjCImplementation;
+
+ case ObjCCategoryImpl:
+ return IDNS_ObjCCategoryImpl;
+
+ case Field:
+ case ObjCAtDefsField:
+ case ObjCIvar:
+ return IDNS_Member;
+
+ case Record:
+ case CXXRecord:
+ case Enum:
+ case TemplateTypeParm:
+ return IDNS_Tag;
+
+ case Namespace:
+ case Template:
+ case FunctionTemplate:
+ case ClassTemplate:
+ case TemplateTemplateParm:
+ case NamespaceAlias:
+ return IDNS_Tag | IDNS_Ordinary;
+
+ // Never have names.
+ case LinkageSpec:
+ case FileScopeAsm:
+ case StaticAssert:
+ case ObjCClass:
+ case ObjCPropertyImpl:
+ case ObjCForwardProtocol:
+ case Block:
+ case TranslationUnit:
+
+ // Aren't looked up?
+ case UsingDirective:
+ case ClassTemplateSpecialization:
+ case ClassTemplatePartialSpecialization:
+ return 0;
+ }
+}
+
+void Decl::addAttr(Attr *NewAttr) {
+ if (!DeclAttrs)
+ DeclAttrs = new DeclAttrMapTy();
+
+ Attr *&ExistingAttr = (*DeclAttrs)[this];
+
+ NewAttr->setNext(ExistingAttr);
+ ExistingAttr = NewAttr;
+
+ HasAttrs = true;
+}
+
+void Decl::invalidateAttrs() {
+ if (!HasAttrs) return;
+
+ HasAttrs = false;
+ (*DeclAttrs)[this] = 0;
+ DeclAttrs->erase(this);
+
+ if (DeclAttrs->empty()) {
+ delete DeclAttrs;
+ DeclAttrs = 0;
+ }
+}
+
+const Attr *Decl::getAttrsImpl() const {
+ assert(HasAttrs && "getAttrs() should verify this!");
+ return (*DeclAttrs)[this];
+}
+
+void Decl::swapAttrs(Decl *RHS) {
+ bool HasLHSAttr = this->HasAttrs;
+ bool HasRHSAttr = RHS->HasAttrs;
+
+ // Usually, neither decl has attrs, nothing to do.
+ if (!HasLHSAttr && !HasRHSAttr) return;
+
+ // If 'this' has no attrs, swap the other way.
+ if (!HasLHSAttr)
+ return RHS->swapAttrs(this);
+
+ // Handle the case when both decls have attrs.
+ if (HasRHSAttr) {
+ std::swap((*DeclAttrs)[this], (*DeclAttrs)[RHS]);
+ return;
+ }
+
+ // Otherwise, LHS has an attr and RHS doesn't.
+ (*DeclAttrs)[RHS] = (*DeclAttrs)[this];
+ (*DeclAttrs).erase(this);
+ this->HasAttrs = false;
+ RHS->HasAttrs = true;
+}
+
+
+void Decl::Destroy(ASTContext &C) {
+ // Free attributes for this decl.
+ if (HasAttrs) {
+ DeclAttrMapTy::iterator it = DeclAttrs->find(this);
+ assert(it != DeclAttrs->end() && "No attrs found but HasAttrs is true!");
+
+ // release attributes.
+ it->second->Destroy(C);
+ invalidateAttrs();
+ HasAttrs = false;
+ }
+
+#if 0
+ // FIXME: Once ownership is fully understood, we can enable this code
+ if (DeclContext *DC = dyn_cast<DeclContext>(this))
+ DC->decls_begin()->Destroy(C);
+
+ // Observe the unrolled recursion. By setting N->NextDeclInContext = 0x0
+ // within the loop, only the Destroy method for the first Decl
+ // will deallocate all of the Decls in a chain.
+
+ Decl* N = getNextDeclInContext();
+
+ while (N) {
+ Decl* Tmp = N->getNextDeclInContext();
+ N->NextDeclInContext = 0;
+ N->Destroy(C);
+ N = Tmp;
+ }
+
+ this->~Decl();
+ C.Deallocate((void *)this);
+#endif
+}
+
+Decl *Decl::castFromDeclContext (const DeclContext *D) {
+ Decl::Kind DK = D->getDeclKind();
+ switch(DK) {
+#define DECL_CONTEXT(Name) \
+ case Decl::Name: \
+ return static_cast<Name##Decl*>(const_cast<DeclContext*>(D));
+#define DECL_CONTEXT_BASE(Name)
+#include "clang/AST/DeclNodes.def"
+ default:
+#define DECL_CONTEXT_BASE(Name) \
+ if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \
+ return static_cast<Name##Decl*>(const_cast<DeclContext*>(D));
+#include "clang/AST/DeclNodes.def"
+ assert(false && "a decl that inherits DeclContext isn't handled");
+ return 0;
+ }
+}
+
+DeclContext *Decl::castToDeclContext(const Decl *D) {
+ Decl::Kind DK = D->getKind();
+ switch(DK) {
+#define DECL_CONTEXT(Name) \
+ case Decl::Name: \
+ return static_cast<Name##Decl*>(const_cast<Decl*>(D));
+#define DECL_CONTEXT_BASE(Name)
+#include "clang/AST/DeclNodes.def"
+ default:
+#define DECL_CONTEXT_BASE(Name) \
+ if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \
+ return static_cast<Name##Decl*>(const_cast<Decl*>(D));
+#include "clang/AST/DeclNodes.def"
+ assert(false && "a decl that inherits DeclContext isn't handled");
+ return 0;
+ }
+}
+
+CompoundStmt* Decl::getCompoundBody(ASTContext &Context) const {
+ return dyn_cast_or_null<CompoundStmt>(getBody(Context));
+}
+
+SourceLocation Decl::getBodyRBrace(ASTContext &Context) const {
+ Stmt *Body = getBody(Context);
+ if (!Body)
+ return SourceLocation();
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
+ return CS->getRBracLoc();
+ assert(isa<CXXTryStmt>(Body) &&
+ "Body can only be CompoundStmt or CXXTryStmt");
+ return cast<CXXTryStmt>(Body)->getSourceRange().getEnd();
+}
+
+#ifndef NDEBUG
+void Decl::CheckAccessDeclContext() const {
+ assert((Access != AS_none || isa<TranslationUnitDecl>(this) ||
+ !isa<CXXRecordDecl>(getDeclContext())) &&
+ "Access specifier is AS_none inside a record decl");
+}
+
+#endif
+
+//===----------------------------------------------------------------------===//
+// DeclContext Implementation
+//===----------------------------------------------------------------------===//
+
+bool DeclContext::classof(const Decl *D) {
+ switch (D->getKind()) {
+#define DECL_CONTEXT(Name) case Decl::Name:
+#define DECL_CONTEXT_BASE(Name)
+#include "clang/AST/DeclNodes.def"
+ return true;
+ default:
+#define DECL_CONTEXT_BASE(Name) \
+ if (D->getKind() >= Decl::Name##First && \
+ D->getKind() <= Decl::Name##Last) \
+ return true;
+#include "clang/AST/DeclNodes.def"
+ return false;
+ }
+}
+
+DeclContext::~DeclContext() {
+ delete static_cast<StoredDeclsMap*>(LookupPtr);
+}
+
+void DeclContext::DestroyDecls(ASTContext &C) {
+ for (decl_iterator D = decls_begin(C); D != decls_end(C); )
+ (*D++)->Destroy(C);
+}
+
+bool DeclContext::isDependentContext() const {
+ if (isFileContext())
+ return false;
+
+ if (isa<ClassTemplatePartialSpecializationDecl>(this))
+ return true;
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
+ if (Record->getDescribedClassTemplate())
+ return true;
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this))
+ if (Function->getDescribedFunctionTemplate())
+ return true;
+
+ return getParent() && getParent()->isDependentContext();
+}
+
+bool DeclContext::isTransparentContext() const {
+ if (DeclKind == Decl::Enum)
+ return true; // FIXME: Check for C++0x scoped enums
+ else if (DeclKind == Decl::LinkageSpec)
+ return true;
+ else if (DeclKind >= Decl::RecordFirst && DeclKind <= Decl::RecordLast)
+ return cast<RecordDecl>(this)->isAnonymousStructOrUnion();
+ else if (DeclKind == Decl::Namespace)
+ return false; // FIXME: Check for C++0x inline namespaces
+
+ return false;
+}
+
+DeclContext *DeclContext::getPrimaryContext() {
+ switch (DeclKind) {
+ case Decl::TranslationUnit:
+ case Decl::LinkageSpec:
+ case Decl::Block:
+ // There is only one DeclContext for these entities.
+ return this;
+
+ case Decl::Namespace:
+ // The original namespace is our primary context.
+ return static_cast<NamespaceDecl*>(this)->getOriginalNamespace();
+
+ case Decl::ObjCMethod:
+ return this;
+
+ case Decl::ObjCInterface:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCCategory:
+ // FIXME: Can Objective-C interfaces be forward-declared?
+ return this;
+
+ case Decl::ObjCImplementation:
+ case Decl::ObjCCategoryImpl:
+ return this;
+
+ default:
+ if (DeclKind >= Decl::TagFirst && DeclKind <= Decl::TagLast) {
+ // If this is a tag type that has a definition or is currently
+ // being defined, that definition is our primary context.
+ if (const TagType *TagT =cast<TagDecl>(this)->TypeForDecl->getAsTagType())
+ if (TagT->isBeingDefined() ||
+ (TagT->getDecl() && TagT->getDecl()->isDefinition()))
+ return TagT->getDecl();
+ return this;
+ }
+
+ assert(DeclKind >= Decl::FunctionFirst && DeclKind <= Decl::FunctionLast &&
+ "Unknown DeclContext kind");
+ return this;
+ }
+}
+
+DeclContext *DeclContext::getNextContext() {
+ switch (DeclKind) {
+ case Decl::Namespace:
+ // Return the next namespace
+ return static_cast<NamespaceDecl*>(this)->getNextNamespace();
+
+ default:
+ return 0;
+ }
+}
+
+/// \brief Load the declarations within this lexical storage from an
+/// external source.
+void
+DeclContext::LoadLexicalDeclsFromExternalStorage(ASTContext &Context) const {
+ ExternalASTSource *Source = Context.getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ llvm::SmallVector<uint32_t, 64> Decls;
+ if (Source->ReadDeclsLexicallyInContext(const_cast<DeclContext *>(this),
+ Decls))
+ return;
+
+ // There is no longer any lexical storage in this context
+ ExternalLexicalStorage = false;
+
+ if (Decls.empty())
+ return;
+
+ // Resolve all of the declaration IDs into declarations, building up
+ // a chain of declarations via the Decl::NextDeclInContext field.
+ Decl *FirstNewDecl = 0;
+ Decl *PrevDecl = 0;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ Decl *D = Source->GetDecl(Decls[I]);
+ if (PrevDecl)
+ PrevDecl->NextDeclInContext = D;
+ else
+ FirstNewDecl = D;
+
+ PrevDecl = D;
+ }
+
+ // Splice the newly-read declarations into the beginning of the list
+ // of declarations.
+ PrevDecl->NextDeclInContext = FirstDecl;
+ FirstDecl = FirstNewDecl;
+ if (!LastDecl)
+ LastDecl = PrevDecl;
+}
+
+void
+DeclContext::LoadVisibleDeclsFromExternalStorage(ASTContext &Context) const {
+ DeclContext *This = const_cast<DeclContext *>(this);
+ ExternalASTSource *Source = Context.getExternalSource();
+ assert(hasExternalVisibleStorage() && Source && "No external storage?");
+
+ llvm::SmallVector<VisibleDeclaration, 64> Decls;
+ if (Source->ReadDeclsVisibleInContext(This, Decls))
+ return;
+
+ // There is no longer any visible storage in this context
+ ExternalVisibleStorage = false;
+
+ // Load the declaration IDs for all of the names visible in this
+ // context.
+ assert(!LookupPtr && "Have a lookup map before de-serialization?");
+ StoredDeclsMap *Map = new StoredDeclsMap;
+ LookupPtr = Map;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ (*Map)[Decls[I].Name].setFromDeclIDs(Decls[I].Declarations);
+ }
+}
+
+DeclContext::decl_iterator DeclContext::decls_begin(ASTContext &Context) const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage(Context);
+
+ // FIXME: Check whether we need to load some declarations from
+ // external storage.
+ return decl_iterator(FirstDecl);
+}
+
+DeclContext::decl_iterator DeclContext::decls_end(ASTContext &Context) const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage(Context);
+
+ return decl_iterator();
+}
+
+bool DeclContext::decls_empty(ASTContext &Context) const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage(Context);
+
+ return !FirstDecl;
+}
+
+void DeclContext::addDecl(ASTContext &Context, Decl *D) {
+ assert(D->getLexicalDeclContext() == this &&
+ "Decl inserted into wrong lexical context");
+ assert(!D->getNextDeclInContext() && D != LastDecl &&
+ "Decl already inserted into a DeclContext");
+
+ if (FirstDecl) {
+ LastDecl->NextDeclInContext = D;
+ LastDecl = D;
+ } else {
+ FirstDecl = LastDecl = D;
+ }
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->makeDeclVisibleInContext(Context, ND);
+}
+
+/// buildLookup - Build the lookup data structure with all of the
+/// declarations in DCtx (and any other contexts linked to it or
+/// transparent contexts nested within it).
+void DeclContext::buildLookup(ASTContext &Context, DeclContext *DCtx) {
+ for (; DCtx; DCtx = DCtx->getNextContext()) {
+ for (decl_iterator D = DCtx->decls_begin(Context),
+ DEnd = DCtx->decls_end(Context);
+ D != DEnd; ++D) {
+ // Insert this declaration into the lookup structure
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
+ makeDeclVisibleInContextImpl(Context, ND);
+
+ // If this declaration is itself a transparent declaration context,
+ // add its members (recursively).
+ if (DeclContext *InnerCtx = dyn_cast<DeclContext>(*D))
+ if (InnerCtx->isTransparentContext())
+ buildLookup(Context, InnerCtx->getPrimaryContext());
+ }
+ }
+}
+
+DeclContext::lookup_result
+DeclContext::lookup(ASTContext &Context, DeclarationName Name) {
+ DeclContext *PrimaryContext = getPrimaryContext();
+ if (PrimaryContext != this)
+ return PrimaryContext->lookup(Context, Name);
+
+ if (hasExternalVisibleStorage())
+ LoadVisibleDeclsFromExternalStorage(Context);
+
+ /// If there is no lookup data structure, build one now by walking
+ /// all of the linked DeclContexts (in declaration order!) and
+ /// inserting their values.
+ if (!LookupPtr) {
+ buildLookup(Context, this);
+
+ if (!LookupPtr)
+ return lookup_result(0, 0);
+ }
+
+ StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(LookupPtr);
+ StoredDeclsMap::iterator Pos = Map->find(Name);
+ if (Pos == Map->end())
+ return lookup_result(0, 0);
+ return Pos->second.getLookupResult(Context);
+}
+
+DeclContext::lookup_const_result
+DeclContext::lookup(ASTContext &Context, DeclarationName Name) const {
+ return const_cast<DeclContext*>(this)->lookup(Context, Name);
+}
+
+DeclContext *DeclContext::getLookupContext() {
+ DeclContext *Ctx = this;
+ // Skip through transparent contexts.
+ while (Ctx->isTransparentContext())
+ Ctx = Ctx->getParent();
+ return Ctx;
+}
+
+DeclContext *DeclContext::getEnclosingNamespaceContext() {
+ DeclContext *Ctx = this;
+ // Skip through non-namespace, non-translation-unit contexts.
+ while (!Ctx->isFileContext() || Ctx->isTransparentContext())
+ Ctx = Ctx->getParent();
+ return Ctx->getPrimaryContext();
+}
+
+void DeclContext::makeDeclVisibleInContext(ASTContext &Context, NamedDecl *D) {
+ // FIXME: This feels like a hack. Should DeclarationName support
+ // template-ids, or is there a better way to keep specializations
+ // from being visible?
+ if (isa<ClassTemplateSpecializationDecl>(D))
+ return;
+
+ DeclContext *PrimaryContext = getPrimaryContext();
+ if (PrimaryContext != this) {
+ PrimaryContext->makeDeclVisibleInContext(Context, D);
+ return;
+ }
+
+ // If we already have a lookup data structure, perform the insertion
+ // into it. Otherwise, be lazy and don't build that structure until
+ // someone asks for it.
+ if (LookupPtr)
+ makeDeclVisibleInContextImpl(Context, D);
+
+ // If we are a transparent context, insert into our parent context,
+ // too. This operation is recursive.
+ if (isTransparentContext())
+ getParent()->makeDeclVisibleInContext(Context, D);
+}
+
+void DeclContext::makeDeclVisibleInContextImpl(ASTContext &Context,
+ NamedDecl *D) {
+ // Skip unnamed declarations.
+ if (!D->getDeclName())
+ return;
+
+ // FIXME: This feels like a hack. Should DeclarationName support
+ // template-ids, or is there a better way to keep specializations
+ // from being visible?
+ if (isa<ClassTemplateSpecializationDecl>(D))
+ return;
+
+ if (!LookupPtr)
+ LookupPtr = new StoredDeclsMap;
+
+ // Insert this declaration into the map.
+ StoredDeclsMap &Map = *static_cast<StoredDeclsMap*>(LookupPtr);
+ StoredDeclsList &DeclNameEntries = Map[D->getDeclName()];
+ if (DeclNameEntries.isNull()) {
+ DeclNameEntries.setOnlyValue(D);
+ return;
+ }
+
+ // If it is possible that this is a redeclaration, check to see if there is
+ // already a decl for which declarationReplaces returns true. If there is
+ // one, just replace it and return.
+ if (DeclNameEntries.HandleRedeclaration(Context, D))
+ return;
+
+ // Put this declaration into the appropriate slot.
+ DeclNameEntries.AddSubsequentDecl(D);
+}
+
+/// Returns iterator range [First, Last) of UsingDirectiveDecls stored within
+/// this context.
+DeclContext::udir_iterator_range
+DeclContext::getUsingDirectives(ASTContext &Context) const {
+ lookup_const_result Result = lookup(Context, UsingDirectiveDecl::getName());
+ return udir_iterator_range(reinterpret_cast<udir_iterator>(Result.first),
+ reinterpret_cast<udir_iterator>(Result.second));
+}
+
+void StoredDeclsList::materializeDecls(ASTContext &Context) {
+ if (isNull())
+ return;
+
+ switch ((DataKind)(Data & 0x03)) {
+ case DK_Decl:
+ case DK_Decl_Vector:
+ break;
+
+ case DK_DeclID: {
+ // Resolve this declaration ID to an actual declaration by
+ // querying the external AST source.
+ unsigned DeclID = Data >> 2;
+
+ ExternalASTSource *Source = Context.getExternalSource();
+ assert(Source && "No external AST source available!");
+
+ Data = reinterpret_cast<uintptr_t>(Source->GetDecl(DeclID));
+ break;
+ }
+
+ case DK_ID_Vector: {
+ // We have a vector of declaration IDs. Resolve all of them to
+ // actual declarations.
+ VectorTy &Vector = *getAsVector();
+ ExternalASTSource *Source = Context.getExternalSource();
+ assert(Source && "No external AST source available!");
+
+ for (unsigned I = 0, N = Vector.size(); I != N; ++I)
+ Vector[I] = reinterpret_cast<uintptr_t>(Source->GetDecl(Vector[I]));
+
+ Data = (Data & ~0x03) | DK_Decl_Vector;
+ break;
+ }
+ }
+}
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
new file mode 100644
index 0000000..19f8958
--- /dev/null
+++ b/lib/AST/DeclCXX.cpp
@@ -0,0 +1,462 @@
+//===--- DeclCXX.cpp - C++ Declaration AST Node Implementation ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id)
+ : RecordDecl(K, TK, DC, L, Id),
+ UserDeclaredConstructor(false), UserDeclaredCopyConstructor(false),
+ UserDeclaredCopyAssignment(false), UserDeclaredDestructor(false),
+ Aggregate(true), PlainOldData(true), Polymorphic(false), Abstract(false),
+ HasTrivialConstructor(true), HasTrivialDestructor(true),
+ Bases(0), NumBases(0), Conversions(DC, DeclarationName()),
+ TemplateOrInstantiation() { }
+
+CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ CXXRecordDecl* PrevDecl,
+ bool DelayTypeCreation) {
+ CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TK, DC, L, Id);
+ if (!DelayTypeCreation)
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+CXXRecordDecl::~CXXRecordDecl() {
+ delete [] Bases;
+}
+
+void
+CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
+ unsigned NumBases) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with [...]
+ // no base classes [...].
+ Aggregate = false;
+
+ if (this->Bases)
+ delete [] this->Bases;
+
+ // FIXME: allocate using the ASTContext
+ this->Bases = new CXXBaseSpecifier[NumBases];
+ this->NumBases = NumBases;
+ for (unsigned i = 0; i < NumBases; ++i)
+ this->Bases[i] = *Bases[i];
+}
+
+bool CXXRecordDecl::hasConstCopyConstructor(ASTContext &Context) const {
+ QualType ClassType
+ = Context.getTypeDeclType(const_cast<CXXRecordDecl*>(this));
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ unsigned TypeQuals;
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = this->lookup(Context, ConstructorName);
+ Con != ConEnd; ++Con) {
+ if (cast<CXXConstructorDecl>(*Con)->isCopyConstructor(Context, TypeQuals) &&
+ (TypeQuals & QualType::Const) != 0)
+ return true;
+ }
+
+ return false;
+}
+
+bool CXXRecordDecl::hasConstCopyAssignment(ASTContext &Context) const {
+ QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(
+ const_cast<CXXRecordDecl*>(this)));
+ DeclarationName OpName =Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+
+ DeclContext::lookup_const_iterator Op, OpEnd;
+ for (llvm::tie(Op, OpEnd) = this->lookup(Context, OpName);
+ Op != OpEnd; ++Op) {
+ // C++ [class.copy]p9:
+ // A user-declared copy assignment operator is a non-static non-template
+ // member function of class X with exactly one parameter of type X, X&,
+ // const X&, volatile X& or const volatile X&.
+ const CXXMethodDecl* Method = cast<CXXMethodDecl>(*Op);
+ if (Method->isStatic())
+ continue;
+ // TODO: Skip templates? Or is this implicitly done due to parameter types?
+ const FunctionProtoType *FnType =
+ Method->getType()->getAsFunctionProtoType();
+ assert(FnType && "Overloaded operator has no prototype.");
+ // Don't assert on this; an invalid decl might have been left in the AST.
+ if (FnType->getNumArgs() != 1 || FnType->isVariadic())
+ continue;
+ bool AcceptsConst = true;
+ QualType ArgType = FnType->getArgType(0);
+ if (const LValueReferenceType *Ref = ArgType->getAsLValueReferenceType()) {
+ ArgType = Ref->getPointeeType();
+ // Is it a non-const lvalue reference?
+ if (!ArgType.isConstQualified())
+ AcceptsConst = false;
+ }
+ if (Context.getCanonicalType(ArgType).getUnqualifiedType() != ClassType)
+ continue;
+
+ // We have a single argument of type cv X or cv X&, i.e. we've found the
+ // copy assignment operator. Return whether it accepts const arguments.
+ return AcceptsConst;
+ }
+ assert(isInvalidDecl() &&
+ "No copy assignment operator declared in valid code.");
+ return false;
+}
+
+void
+CXXRecordDecl::addedConstructor(ASTContext &Context,
+ CXXConstructorDecl *ConDecl) {
+ if (!ConDecl->isImplicit()) {
+ // Note that we have a user-declared constructor.
+ UserDeclaredConstructor = true;
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with no
+ // user-declared constructors (12.1) [...].
+ Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class [...]
+ PlainOldData = false;
+
+ // C++ [class.ctor]p5:
+ // A constructor is trivial if it is an implicitly-declared default
+ // constructor.
+ HasTrivialConstructor = false;
+
+ // Note when we have a user-declared copy constructor, which will
+ // suppress the implicit declaration of a copy constructor.
+ if (ConDecl->isCopyConstructor(Context))
+ UserDeclaredCopyConstructor = true;
+ }
+}
+
+void CXXRecordDecl::addedAssignmentOperator(ASTContext &Context,
+ CXXMethodDecl *OpDecl) {
+ // We're interested specifically in copy assignment operators.
+ // Unlike addedConstructor, this method is not called for implicit
+ // declarations.
+ const FunctionProtoType *FnType = OpDecl->getType()->getAsFunctionProtoType();
+ assert(FnType && "Overloaded operator has no proto function type.");
+ assert(FnType->getNumArgs() == 1 && !FnType->isVariadic());
+ QualType ArgType = FnType->getArgType(0);
+ if (const LValueReferenceType *Ref = ArgType->getAsLValueReferenceType())
+ ArgType = Ref->getPointeeType();
+
+ ArgType = ArgType.getUnqualifiedType();
+ QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(
+ const_cast<CXXRecordDecl*>(this)));
+
+ if (ClassType != Context.getCanonicalType(ArgType))
+ return;
+
+ // This is a copy assignment operator.
+ // Suppress the implicit declaration of a copy constructor.
+ UserDeclaredCopyAssignment = true;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class that [...] has no user-defined copy
+ // assignment operator [...].
+ PlainOldData = false;
+}
+
+void CXXRecordDecl::addConversionFunction(ASTContext &Context,
+ CXXConversionDecl *ConvDecl) {
+ Conversions.addOverload(ConvDecl);
+}
+
+const CXXDestructorDecl *
+CXXRecordDecl::getDestructor(ASTContext &Context) {
+ QualType ClassType = Context.getTypeDeclType(this);
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(ClassType);
+
+ DeclContext::lookup_iterator I, E;
+ llvm::tie(I, E) = lookup(Context, Name);
+ assert(I != E && "Did not find a destructor!");
+
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(*I);
+ assert(++I == E && "Found more than one destructor!");
+
+ return Dtor;
+}
+
+CXXMethodDecl *
+CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation L, DeclarationName N,
+ QualType T, bool isStatic, bool isInline) {
+ return new (C) CXXMethodDecl(CXXMethod, RD, L, N, T, isStatic, isInline);
+}
+
+
+typedef llvm::DenseMap<const CXXMethodDecl*,
+ std::vector<const CXXMethodDecl *> *>
+ OverriddenMethodsMapTy;
+
+static OverriddenMethodsMapTy *OverriddenMethods = 0;
+
+void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) {
+ // FIXME: The CXXMethodDecl dtor needs to remove and free the entry.
+
+ if (!OverriddenMethods)
+ OverriddenMethods = new OverriddenMethodsMapTy();
+
+ std::vector<const CXXMethodDecl *> *&Methods = (*OverriddenMethods)[this];
+ if (!Methods)
+ Methods = new std::vector<const CXXMethodDecl *>;
+
+ Methods->push_back(MD);
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const {
+ if (!OverriddenMethods)
+ return 0;
+
+ OverriddenMethodsMapTy::iterator it = OverriddenMethods->find(this);
+ if (it == OverriddenMethods->end())
+ return 0;
+ return &(*it->second)[0];
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
+ if (!OverriddenMethods)
+ return 0;
+
+ OverriddenMethodsMapTy::iterator it = OverriddenMethods->find(this);
+ if (it == OverriddenMethods->end())
+ return 0;
+
+ return &(*it->second)[it->second->size()];
+}
+
+QualType CXXMethodDecl::getThisType(ASTContext &C) const {
+ // C++ 9.3.2p1: The type of this in a member function of a class X is X*.
+ // If the member function is declared const, the type of this is const X*,
+ // if the member function is declared volatile, the type of this is
+ // volatile X*, and if the member function is declared const volatile,
+ // the type of this is const volatile X*.
+
+ assert(isInstance() && "No 'this' for static methods!");
+ QualType ClassTy = C.getTagDeclType(const_cast<CXXRecordDecl*>(getParent()));
+ ClassTy = ClassTy.getWithAdditionalQualifiers(getTypeQualifiers());
+ return C.getPointerType(ClassTy).withConst();
+}
+
+CXXBaseOrMemberInitializer::
+CXXBaseOrMemberInitializer(QualType BaseType, Expr **Args, unsigned NumArgs)
+ : Args(0), NumArgs(0) {
+ BaseOrMember = reinterpret_cast<uintptr_t>(BaseType.getTypePtr());
+ assert((BaseOrMember & 0x01) == 0 && "Invalid base class type pointer");
+ BaseOrMember |= 0x01;
+
+ if (NumArgs > 0) {
+ this->NumArgs = NumArgs;
+ this->Args = new Expr*[NumArgs];
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ this->Args[Idx] = Args[Idx];
+ }
+}
+
+CXXBaseOrMemberInitializer::
+CXXBaseOrMemberInitializer(FieldDecl *Member, Expr **Args, unsigned NumArgs)
+ : Args(0), NumArgs(0) {
+ BaseOrMember = reinterpret_cast<uintptr_t>(Member);
+ assert((BaseOrMember & 0x01) == 0 && "Invalid member pointer");
+
+ if (NumArgs > 0) {
+ this->NumArgs = NumArgs;
+ this->Args = new Expr*[NumArgs];
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ this->Args[Idx] = Args[Idx];
+ }
+}
+
+CXXBaseOrMemberInitializer::~CXXBaseOrMemberInitializer() {
+ delete [] Args;
+}
+
+CXXConstructorDecl *
+CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation L, DeclarationName N,
+ QualType T, bool isExplicit,
+ bool isInline, bool isImplicitlyDeclared) {
+ assert(N.getNameKind() == DeclarationName::CXXConstructorName &&
+ "Name must refer to a constructor");
+ return new (C) CXXConstructorDecl(RD, L, N, T, isExplicit, isInline,
+ isImplicitlyDeclared);
+}
+
+bool CXXConstructorDecl::isDefaultConstructor() const {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class
+ // X that can be called without an argument.
+ return (getNumParams() == 0) ||
+ (getNumParams() > 0 && getParamDecl(0)->getDefaultArg() != 0);
+}
+
+bool
+CXXConstructorDecl::isCopyConstructor(ASTContext &Context,
+ unsigned &TypeQuals) const {
+ // C++ [class.copy]p2:
+ // A non-template constructor for class X is a copy constructor
+ // if its first parameter is of type X&, const X&, volatile X& or
+ // const volatile X&, and either there are no other parameters
+ // or else all other parameters have default arguments (8.3.6).
+ if ((getNumParams() < 1) ||
+ (getNumParams() > 1 && getParamDecl(1)->getDefaultArg() == 0))
+ return false;
+
+ const ParmVarDecl *Param = getParamDecl(0);
+
+ // Do we have a reference type? Rvalue references don't count.
+ const LValueReferenceType *ParamRefType =
+ Param->getType()->getAsLValueReferenceType();
+ if (!ParamRefType)
+ return false;
+
+ // Is it a reference to our class type?
+ QualType PointeeType
+ = Context.getCanonicalType(ParamRefType->getPointeeType());
+ QualType ClassTy
+ = Context.getTagDeclType(const_cast<CXXRecordDecl*>(getParent()));
+ if (PointeeType.getUnqualifiedType() != ClassTy)
+ return false;
+
+ // We have a copy constructor.
+ TypeQuals = PointeeType.getCVRQualifiers();
+ return true;
+}
+
+bool CXXConstructorDecl::isConvertingConstructor() const {
+ // C++ [class.conv.ctor]p1:
+ // A constructor declared without the function-specifier explicit
+ // that can be called with a single parameter specifies a
+ // conversion from the type of its first parameter to the type of
+ // its class. Such a constructor is called a converting
+ // constructor.
+ if (isExplicit())
+ return false;
+
+ return (getNumParams() == 0 &&
+ getType()->getAsFunctionProtoType()->isVariadic()) ||
+ (getNumParams() == 1) ||
+ (getNumParams() > 1 && getParamDecl(1)->getDefaultArg() != 0);
+}
+
+CXXDestructorDecl *
+CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation L, DeclarationName N,
+ QualType T, bool isInline,
+ bool isImplicitlyDeclared) {
+ assert(N.getNameKind() == DeclarationName::CXXDestructorName &&
+ "Name must refer to a destructor");
+ return new (C) CXXDestructorDecl(RD, L, N, T, isInline,
+ isImplicitlyDeclared);
+}
+
+CXXConversionDecl *
+CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation L, DeclarationName N,
+ QualType T, bool isInline, bool isExplicit) {
+ assert(N.getNameKind() == DeclarationName::CXXConversionFunctionName &&
+ "Name must refer to a conversion function");
+ return new (C) CXXConversionDecl(RD, L, N, T, isInline, isExplicit);
+}
+
+OverloadedFunctionDecl *
+OverloadedFunctionDecl::Create(ASTContext &C, DeclContext *DC,
+ DeclarationName N) {
+ return new (C) OverloadedFunctionDecl(DC, N);
+}
+
+LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ LanguageIDs Lang, bool Braces) {
+ return new (C) LinkageSpecDecl(DC, L, Lang, Braces);
+}
+
+UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ SourceLocation NamespaceLoc,
+ SourceRange QualifierRange,
+ NestedNameSpecifier *Qualifier,
+ SourceLocation IdentLoc,
+ NamespaceDecl *Used,
+ DeclContext *CommonAncestor) {
+ return new (C) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierRange,
+ Qualifier, IdentLoc, Used, CommonAncestor);
+}
+
+NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ SourceRange QualifierRange,
+ NestedNameSpecifier *Qualifier,
+ SourceLocation IdentLoc,
+ NamedDecl *Namespace) {
+ return new (C) NamespaceAliasDecl(DC, L, AliasLoc, Alias, QualifierRange,
+ Qualifier, IdentLoc, Namespace);
+}
+
+StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, Expr *AssertExpr,
+ StringLiteral *Message) {
+ return new (C) StaticAssertDecl(DC, L, AssertExpr, Message);
+}
+
+void StaticAssertDecl::Destroy(ASTContext& C) {
+ AssertExpr->Destroy(C);
+ Message->Destroy(C);
+ this->~StaticAssertDecl();
+ C.Deallocate((void *)this);
+}
+
+StaticAssertDecl::~StaticAssertDecl() {
+}
+
+static const char *getAccessName(AccessSpecifier AS) {
+ switch (AS) {
+ default:
+ case AS_none:
+ assert("Invalid access specifier!");
+ return 0;
+ case AS_public:
+ return "public";
+ case AS_private:
+ return "private";
+ case AS_protected:
+ return "protected";
+ }
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
+
+
diff --git a/lib/AST/DeclGroup.cpp b/lib/AST/DeclGroup.cpp
new file mode 100644
index 0000000..5bdc881
--- /dev/null
+++ b/lib/AST/DeclGroup.cpp
@@ -0,0 +1,37 @@
+//===--- DeclGroup.cpp - Classes for representing groups of Decls -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclGroup and DeclGroupRef classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/Support/Allocator.h"
+using namespace clang;
+
+DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
+ assert(NumDecls > 1 && "Invalid DeclGroup");
+ unsigned Size = sizeof(DeclGroup) + sizeof(Decl*) * NumDecls;
+ void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment);
+ new (Mem) DeclGroup(NumDecls, Decls);
+ return static_cast<DeclGroup*>(Mem);
+}
+
+DeclGroup::DeclGroup(unsigned numdecls, Decl** decls) : NumDecls(numdecls) {
+ assert(numdecls > 0);
+ assert(decls);
+ memcpy(this+1, decls, numdecls * sizeof(*decls));
+}
+
+void DeclGroup::Destroy(ASTContext& C) {
+ this->~DeclGroup();
+ C.Deallocate((void*) this);
+}
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
new file mode 100644
index 0000000..f4bb895
--- /dev/null
+++ b/lib/AST/DeclObjC.cpp
@@ -0,0 +1,693 @@
+//===--- DeclObjC.cpp - ObjC Declaration AST Node Implementation ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Objective-C related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// ObjCListBase
+//===----------------------------------------------------------------------===//
+
+void ObjCListBase::Destroy(ASTContext &Ctx) {
+ Ctx.Deallocate(List);
+ NumElts = 0;
+ List = 0;
+}
+
+void ObjCListBase::set(void *const* InList, unsigned Elts, ASTContext &Ctx) {
+ assert(List == 0 && "Elements already set!");
+ if (Elts == 0) return; // Setting to an empty list is a noop.
+
+
+ List = new (Ctx) void*[Elts];
+ NumElts = Elts;
+ memcpy(List, InList, sizeof(void*)*Elts);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+// Get the local instance method declared in this interface.
+ObjCMethodDecl *
+ObjCContainerDecl::getInstanceMethod(ASTContext &Context, Selector Sel) const {
+ // Since instance & class methods can have the same name, the loop below
+ // ensures we get the correct method.
+ //
+ // @interface Whatever
+ // - (int) class_method;
+ // + (float) class_method;
+ // @end
+ //
+ lookup_const_iterator Meth, MethEnd;
+ for (llvm::tie(Meth, MethEnd) = lookup(Context, Sel);
+ Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isInstanceMethod())
+ return MD;
+ }
+ return 0;
+}
+
+// Get the local class method declared in this interface.
+ObjCMethodDecl *
+ObjCContainerDecl::getClassMethod(ASTContext &Context, Selector Sel) const {
+ // Since instance & class methods can have the same name, the loop below
+ // ensures we get the correct method.
+ //
+ // @interface Whatever
+ // - (int) class_method;
+ // + (float) class_method;
+ // @end
+ //
+ lookup_const_iterator Meth, MethEnd;
+ for (llvm::tie(Meth, MethEnd) = lookup(Context, Sel);
+ Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isClassMethod())
+ return MD;
+ }
+ return 0;
+}
+
+/// FindPropertyDeclaration - Finds declaration of the property given its name
+/// in 'PropertyId' and returns it. It returns 0, if not found.
+/// FIXME: Convert to DeclContext lookup...
+///
+ObjCPropertyDecl *
+ObjCContainerDecl::FindPropertyDeclaration(ASTContext &Context,
+ IdentifierInfo *PropertyId) const {
+ for (prop_iterator I = prop_begin(Context), E = prop_end(Context);
+ I != E; ++I)
+ if ((*I)->getIdentifier() == PropertyId)
+ return *I;
+
+ const ObjCProtocolDecl *PID = dyn_cast<ObjCProtocolDecl>(this);
+ if (PID) {
+ for (ObjCProtocolDecl::protocol_iterator I = PID->protocol_begin(),
+ E = PID->protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(Context,
+ PropertyId))
+ return P;
+ }
+
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(this)) {
+ // Look through categories.
+ for (ObjCCategoryDecl *Category = OID->getCategoryList();
+ Category; Category = Category->getNextClassCategory()) {
+ if (ObjCPropertyDecl *P = Category->FindPropertyDeclaration(Context,
+ PropertyId))
+ return P;
+ }
+ // Look through protocols.
+ for (ObjCInterfaceDecl::protocol_iterator I = OID->protocol_begin(),
+ E = OID->protocol_end(); I != E; ++I) {
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(Context,
+ PropertyId))
+ return P;
+ }
+ if (OID->getSuperClass())
+ return OID->getSuperClass()->FindPropertyDeclaration(Context,
+ PropertyId);
+ } else if (const ObjCCategoryDecl *OCD = dyn_cast<ObjCCategoryDecl>(this)) {
+ // Look through protocols.
+ for (ObjCInterfaceDecl::protocol_iterator I = OCD->protocol_begin(),
+ E = OCD->protocol_end(); I != E; ++I) {
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(Context,
+ PropertyId))
+ return P;
+ }
+ }
+ return 0;
+}
+
+ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(
+ ASTContext &Context, IdentifierInfo *ID, ObjCInterfaceDecl *&clsDeclared) {
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != NULL) {
+ for (ivar_iterator I = ClassDecl->ivar_begin(), E = ClassDecl->ivar_end();
+ I != E; ++I) {
+ if ((*I)->getIdentifier() == ID) {
+ clsDeclared = ClassDecl;
+ return *I;
+ }
+ }
+ // look into properties.
+ for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(Context),
+ E = ClassDecl->prop_end(Context); I != E; ++I) {
+ ObjCPropertyDecl *PDecl = (*I);
+ if (ObjCIvarDecl *IV = PDecl->getPropertyIvarDecl())
+ if (IV->getIdentifier() == ID) {
+ clsDeclared = ClassDecl;
+ return IV;
+ }
+ }
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+/// lookupInheritedClass - This method returns ObjCInterfaceDecl * of the super
+/// class whose name is passed as argument. If it is not one of the super classes
+/// the it returns NULL.
+ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass(
+ const IdentifierInfo*ICName) {
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != NULL) {
+ if (ClassDecl->getIdentifier() == ICName)
+ return ClassDecl;
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+/// lookupInstanceMethod - This method returns an instance method by looking in
+/// the class, its categories, and its super classes (using a linear search).
+ObjCMethodDecl *ObjCInterfaceDecl::lookupInstanceMethod(ASTContext &Context,
+ Selector Sel) {
+ ObjCInterfaceDecl* ClassDecl = this;
+ ObjCMethodDecl *MethodDecl = 0;
+
+ while (ClassDecl != NULL) {
+ if ((MethodDecl = ClassDecl->getInstanceMethod(Context, Sel)))
+ return MethodDecl;
+
+ // Didn't find one yet - look through protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ ClassDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupInstanceMethod(Context, Sel)))
+ return MethodDecl;
+
+ // Didn't find one yet - now look through categories.
+ ObjCCategoryDecl *CatDecl = ClassDecl->getCategoryList();
+ while (CatDecl) {
+ if ((MethodDecl = CatDecl->getInstanceMethod(Context, Sel)))
+ return MethodDecl;
+
+ // Didn't find one yet - look through protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupInstanceMethod(Context, Sel)))
+ return MethodDecl;
+ CatDecl = CatDecl->getNextClassCategory();
+ }
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+// lookupClassMethod - This method returns a class method by looking in the
+// class, its categories, and its super classes (using a linear search).
+ObjCMethodDecl *ObjCInterfaceDecl::lookupClassMethod(ASTContext &Context,
+ Selector Sel) {
+ ObjCInterfaceDecl* ClassDecl = this;
+ ObjCMethodDecl *MethodDecl = 0;
+
+ while (ClassDecl != NULL) {
+ if ((MethodDecl = ClassDecl->getClassMethod(Context, Sel)))
+ return MethodDecl;
+
+ // Didn't find one yet - look through protocols.
+ for (ObjCInterfaceDecl::protocol_iterator I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupClassMethod(Context, Sel)))
+ return MethodDecl;
+
+ // Didn't find one yet - now look through categories.
+ ObjCCategoryDecl *CatDecl = ClassDecl->getCategoryList();
+ while (CatDecl) {
+ if ((MethodDecl = CatDecl->getClassMethod(Context, Sel)))
+ return MethodDecl;
+
+ // Didn't find one yet - look through protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupClassMethod(Context, Sel)))
+ return MethodDecl;
+ CatDecl = CatDecl->getNextClassCategory();
+ }
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// ObjCMethodDecl
+//===----------------------------------------------------------------------===//
+
+ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C,
+ SourceLocation beginLoc,
+ SourceLocation endLoc,
+ Selector SelInfo, QualType T,
+ DeclContext *contextDecl,
+ bool isInstance,
+ bool isVariadic,
+ bool isSynthesized,
+ ImplementationControl impControl) {
+ return new (C) ObjCMethodDecl(beginLoc, endLoc,
+ SelInfo, T, contextDecl,
+ isInstance,
+ isVariadic, isSynthesized, impControl);
+}
+
+void ObjCMethodDecl::Destroy(ASTContext &C) {
+ if (Body) Body->Destroy(C);
+ if (SelfDecl) SelfDecl->Destroy(C);
+
+ for (param_iterator I=param_begin(), E=param_end(); I!=E; ++I)
+ if (*I) (*I)->Destroy(C);
+
+ ParamInfo.Destroy(C);
+
+ Decl::Destroy(C);
+}
+
+void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ QualType selfTy;
+ if (isInstanceMethod()) {
+ // There may be no interface context due to error in declaration
+ // of the interface (which has been reported). Recover gracefully.
+ if (OID) {
+ selfTy = Context.getObjCInterfaceType(OID);
+ selfTy = Context.getPointerType(selfTy);
+ } else {
+ selfTy = Context.getObjCIdType();
+ }
+ } else // we have a factory method.
+ selfTy = Context.getObjCClassType();
+
+ setSelfDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("self"), selfTy));
+
+ setCmdDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("_cmd"),
+ Context.getObjCSelType()));
+}
+
+
+
+/// getSynthesizedMethodSize - Compute size of synthesized method name
+/// as done be the rewrite.
+///
+unsigned ObjCMethodDecl::getSynthesizedMethodSize() const {
+ // syntesized method name is a concatenation of -/+[class-name selector]
+ // Get length of this name.
+ unsigned length = 3; // _I_ or _C_
+ length += getClassInterface()->getNameAsString().size()+1; // extra for _
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(getDeclContext()))
+ length += CID->getNameAsString().size()+1;
+ length += getSelector().getAsString().size(); // selector name
+ return length;
+}
+
+ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext()))
+ return ID;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
+ return CD->getClassInterface();
+ if (ObjCImplementationDecl *IMD =
+ dyn_cast<ObjCImplementationDecl>(getDeclContext()))
+ return IMD->getClassInterface();
+ if (ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(getDeclContext()))
+ return CID->getClassInterface();
+ assert(false && "unknown method context");
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ IdentifierInfo *Id,
+ SourceLocation ClassLoc,
+ bool ForwardDecl, bool isInternal){
+ return new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc, ForwardDecl,
+ isInternal);
+}
+
+ObjCInterfaceDecl::
+ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
+ SourceLocation CLoc, bool FD, bool isInternal)
+ : ObjCContainerDecl(ObjCInterface, DC, atLoc, Id),
+ TypeForDecl(0), SuperClass(0),
+ CategoryList(0), ForwardDecl(FD), InternalInterface(isInternal),
+ ClassLoc(CLoc) {
+}
+
+void ObjCInterfaceDecl::Destroy(ASTContext &C) {
+ for (ivar_iterator I = ivar_begin(), E = ivar_end(); I != E; ++I)
+ if (*I) (*I)->Destroy(C);
+
+ IVars.Destroy(C);
+ // FIXME: CategoryList?
+
+ // FIXME: Because there is no clear ownership
+ // role between ObjCInterfaceDecls and the ObjCPropertyDecls that they
+ // reference, we destroy ObjCPropertyDecls in ~TranslationUnit.
+ Decl::Destroy(C);
+}
+
+
+/// FindCategoryDeclaration - Finds category declaration in the list of
+/// categories for this class and returns it. Name of the category is passed
+/// in 'CategoryId'. If category not found, return 0;
+///
+ObjCCategoryDecl *
+ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
+ for (ObjCCategoryDecl *Category = getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (Category->getIdentifier() == CategoryId)
+ return Category;
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCIvarDecl
+//===----------------------------------------------------------------------===//
+
+ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ QualType T, AccessControl ac, Expr *BW) {
+ return new (C) ObjCIvarDecl(DC, L, Id, T, ac, BW);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// ObjCAtDefsFieldDecl
+//===----------------------------------------------------------------------===//
+
+ObjCAtDefsFieldDecl
+*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, Expr *BW) {
+ return new (C) ObjCAtDefsFieldDecl(DC, L, Id, T, BW);
+}
+
+void ObjCAtDefsFieldDecl::Destroy(ASTContext& C) {
+ this->~ObjCAtDefsFieldDecl();
+ C.Deallocate((void *)this);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCProtocolDecl
+//===----------------------------------------------------------------------===//
+
+ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id) {
+ return new (C) ObjCProtocolDecl(DC, L, Id);
+}
+
+void ObjCProtocolDecl::Destroy(ASTContext &C) {
+ ReferencedProtocols.Destroy(C);
+ ObjCContainerDecl::Destroy(C);
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) {
+ ObjCProtocolDecl *PDecl = this;
+
+ if (Name == getIdentifier())
+ return PDecl;
+
+ for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
+ if ((PDecl = (*I)->lookupProtocolNamed(Name)))
+ return PDecl;
+
+ return NULL;
+}
+
+// lookupInstanceMethod - Lookup a instance method in the protocol and protocols
+// it inherited.
+ObjCMethodDecl *ObjCProtocolDecl::lookupInstanceMethod(ASTContext &Context,
+ Selector Sel) {
+ ObjCMethodDecl *MethodDecl = NULL;
+
+ if ((MethodDecl = getInstanceMethod(Context, Sel)))
+ return MethodDecl;
+
+ for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupInstanceMethod(Context, Sel)))
+ return MethodDecl;
+ return NULL;
+}
+
+// lookupInstanceMethod - Lookup a class method in the protocol and protocols
+// it inherited.
+ObjCMethodDecl *ObjCProtocolDecl::lookupClassMethod(ASTContext &Context,
+ Selector Sel) {
+ ObjCMethodDecl *MethodDecl = NULL;
+
+ if ((MethodDecl = getClassMethod(Context, Sel)))
+ return MethodDecl;
+
+ for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupClassMethod(Context, Sel)))
+ return MethodDecl;
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCClassDecl
+//===----------------------------------------------------------------------===//
+
+ObjCClassDecl::ObjCClassDecl(DeclContext *DC, SourceLocation L,
+ ObjCInterfaceDecl *const *Elts, unsigned nElts,
+ ASTContext &C)
+ : Decl(ObjCClass, DC, L) {
+ ForwardDecls.set(Elts, nElts, C);
+}
+
+
+ObjCClassDecl *ObjCClassDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ ObjCInterfaceDecl *const *Elts,
+ unsigned nElts) {
+ return new (C) ObjCClassDecl(DC, L, Elts, nElts, C);
+}
+
+void ObjCClassDecl::Destroy(ASTContext &C) {
+
+ // FIXME: There is no clear ownership policy now for referenced
+ // ObjCInterfaceDecls. Some of them can be forward declarations that
+ // are never later defined (in which case the ObjCClassDecl owns them)
+ // or the ObjCInterfaceDecl later becomes a real definition later. Ideally
+ // we should have separate objects for forward declarations and definitions,
+ // obviating this problem. Because of this situation, referenced
+ // ObjCInterfaceDecls are destroyed in ~TranslationUnit.
+
+ ForwardDecls.Destroy(C);
+ Decl::Destroy(C);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCForwardProtocolDecl
+//===----------------------------------------------------------------------===//
+
+ObjCForwardProtocolDecl::
+ObjCForwardProtocolDecl(DeclContext *DC, SourceLocation L,
+ ObjCProtocolDecl *const *Elts, unsigned nElts,
+ ASTContext &C)
+: Decl(ObjCForwardProtocol, DC, L) {
+ ReferencedProtocols.set(Elts, nElts, C);
+}
+
+
+ObjCForwardProtocolDecl *
+ObjCForwardProtocolDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ ObjCProtocolDecl *const *Elts,
+ unsigned NumElts) {
+ return new (C) ObjCForwardProtocolDecl(DC, L, Elts, NumElts, C);
+}
+
+void ObjCForwardProtocolDecl::Destroy(ASTContext &C) {
+ ReferencedProtocols.Destroy(C);
+ Decl::Destroy(C);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryDecl
+//===----------------------------------------------------------------------===//
+
+ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id) {
+ return new (C) ObjCCategoryDecl(DC, L, Id);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryImplDecl
+//===----------------------------------------------------------------------===//
+
+ObjCCategoryImplDecl *
+ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,IdentifierInfo *Id,
+ ObjCInterfaceDecl *ClassInterface) {
+ return new (C) ObjCCategoryImplDecl(DC, L, Id, ClassInterface);
+}
+
+
+void ObjCImplDecl::addPropertyImplementation(ASTContext &Context,
+ ObjCPropertyImplDecl *property) {
+ // FIXME: The context should be correct before we get here.
+ property->setLexicalDeclContext(this);
+ addDecl(Context, property);
+}
+
+/// FindPropertyImplIvarDecl - This method lookup the ivar in the list of
+/// properties implemented in this category @implementation block and returns
+/// the implemented property that uses it.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplIvarDecl(ASTContext &Context, IdentifierInfo *ivarId) const {
+ for (propimpl_iterator i = propimpl_begin(Context), e = propimpl_end(Context);
+ i != e; ++i){
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyIvarDecl() &&
+ PID->getPropertyIvarDecl()->getIdentifier() == ivarId)
+ return PID;
+ }
+ return 0;
+}
+
+/// FindPropertyImplDecl - This method looks up a previous ObjCPropertyImplDecl
+/// added to the list of those properties @synthesized/@dynamic in this
+/// category @implementation block.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplDecl(ASTContext &Context, IdentifierInfo *Id) const {
+ for (propimpl_iterator i = propimpl_begin(Context), e = propimpl_end(Context);
+ i != e; ++i){
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl()->getIdentifier() == Id)
+ return PID;
+ }
+ return 0;
+}
+
+// getInstanceMethod - This method returns an instance method by looking in
+// the class implementation. Unlike interfaces, we don't look outside the
+// implementation.
+ObjCMethodDecl *ObjCImplDecl::getInstanceMethod(ASTContext &Context,
+ Selector Sel) const {
+ // Since instance & class methods can have the same name, the loop below
+ // ensures we get the correct method.
+ //
+ // @interface Whatever
+ // - (int) class_method;
+ // + (float) class_method;
+ // @end
+ //
+ lookup_const_iterator Meth, MethEnd;
+ for (llvm::tie(Meth, MethEnd) = lookup(Context, Sel);
+ Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isInstanceMethod())
+ return MD;
+ }
+ return 0;
+}
+
+// getClassMethod - This method returns an instance method by looking in
+// the class implementation. Unlike interfaces, we don't look outside the
+// implementation.
+ObjCMethodDecl *ObjCImplDecl::getClassMethod(ASTContext &Context,
+ Selector Sel) const {
+ // Since instance & class methods can have the same name, the loop below
+ // ensures we get the correct method.
+ //
+ // @interface Whatever
+ // - (int) class_method;
+ // + (float) class_method;
+ // @end
+ //
+ lookup_const_iterator Meth, MethEnd;
+ for (llvm::tie(Meth, MethEnd) = lookup(Context, Sel);
+ Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isClassMethod())
+ return MD;
+ }
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCImplementationDecl
+//===----------------------------------------------------------------------===//
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ ObjCInterfaceDecl *ClassInterface,
+ ObjCInterfaceDecl *SuperDecl) {
+ return new (C) ObjCImplementationDecl(DC, L, ClassInterface, SuperDecl);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCompatibleAliasDecl
+//===----------------------------------------------------------------------===//
+
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl* AliasedClass) {
+ return new (C) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyDecl
+//===----------------------------------------------------------------------===//
+
+ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ QualType T,
+ PropertyControl propControl) {
+ return new (C) ObjCPropertyDecl(DC, L, Id, T);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyImplDecl
+//===----------------------------------------------------------------------===//
+
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ SourceLocation L,
+ ObjCPropertyDecl *property,
+ Kind PK,
+ ObjCIvarDecl *ivar) {
+ return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar);
+}
+
+
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
new file mode 100644
index 0000000..f29da8b
--- /dev/null
+++ b/lib/AST/DeclPrinter.cpp
@@ -0,0 +1,722 @@
+//===--- DeclPrinter.cpp - Printing implementation for Decl ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl::dump method, which pretty print the
+// AST back out to C/Objective-C/C++/Objective-C++ code.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ class VISIBILITY_HIDDEN DeclPrinter : public DeclVisitor<DeclPrinter> {
+ llvm::raw_ostream &Out;
+ ASTContext &Context;
+ PrintingPolicy Policy;
+ unsigned Indentation;
+
+ llvm::raw_ostream& Indent();
+ void ProcessDeclGroup(llvm::SmallVectorImpl<Decl*>& Decls);
+
+ public:
+ DeclPrinter(llvm::raw_ostream &Out, ASTContext &Context,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0)
+ : Out(Out), Context(Context), Policy(Policy), Indentation(Indentation) { }
+
+ void VisitDeclContext(DeclContext *DC, bool Indent = true);
+
+ void VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ void VisitTypedefDecl(TypedefDecl *D);
+ void VisitEnumDecl(EnumDecl *D);
+ void VisitRecordDecl(RecordDecl *D);
+ void VisitEnumConstantDecl(EnumConstantDecl *D);
+ void VisitFunctionDecl(FunctionDecl *D);
+ void VisitFieldDecl(FieldDecl *D);
+ void VisitVarDecl(VarDecl *D);
+ void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitOriginalParmVarDecl(OriginalParmVarDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitOverloadedFunctionDecl(OverloadedFunctionDecl *D);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitCXXRecordDecl(CXXRecordDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitTemplateDecl(TemplateDecl *D);
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCClassDecl(ObjCClassDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ };
+}
+
+void Decl::print(llvm::raw_ostream &Out, ASTContext &Context,
+ unsigned Indentation) {
+ print(Out, Context, Context.PrintingPolicy, Indentation);
+}
+
+void Decl::print(llvm::raw_ostream &Out, ASTContext &Context,
+ const PrintingPolicy &Policy, unsigned Indentation) {
+ DeclPrinter Printer(Out, Context, Policy, Indentation);
+ Printer.Visit(this);
+}
+
+static QualType GetBaseType(QualType T) {
+ // FIXME: This should be on the Type class!
+ QualType BaseType = T;
+ while (!BaseType->isSpecifierType()) {
+ if (isa<TypedefType>(BaseType))
+ break;
+ else if (const PointerType* PTy = BaseType->getAsPointerType())
+ BaseType = PTy->getPointeeType();
+ else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
+ BaseType = ATy->getElementType();
+ else if (const FunctionType* FTy = BaseType->getAsFunctionType())
+ BaseType = FTy->getResultType();
+ else
+ assert(0 && "Unknown declarator!");
+ }
+ return BaseType;
+}
+
+static QualType getDeclType(Decl* D) {
+ if (TypedefDecl* TDD = dyn_cast<TypedefDecl>(D))
+ return TDD->getUnderlyingType();
+ if (ValueDecl* VD = dyn_cast<ValueDecl>(D))
+ return VD->getType();
+ return QualType();
+}
+
+void Decl::printGroup(Decl** Begin, unsigned NumDecls,
+ llvm::raw_ostream &Out, ASTContext &Context,
+ const PrintingPolicy &Policy,
+ unsigned Indentation) {
+ if (NumDecls == 1) {
+ (*Begin)->print(Out, Context, Policy, Indentation);
+ return;
+ }
+
+ Decl** End = Begin + NumDecls;
+ TagDecl* TD = dyn_cast<TagDecl>(*Begin);
+ if (TD)
+ ++Begin;
+
+ PrintingPolicy SubPolicy(Policy);
+ if (TD && TD->isDefinition()) {
+ TD->print(Out, Context, Policy, Indentation);
+ Out << " ";
+ SubPolicy.SuppressTag = true;
+ }
+
+ bool isFirst = true;
+ for ( ; Begin != End; ++Begin) {
+ if (isFirst) {
+ SubPolicy.SuppressSpecifiers = false;
+ isFirst = false;
+ } else {
+ if (!isFirst) Out << ", ";
+ SubPolicy.SuppressSpecifiers = true;
+ }
+
+ (*Begin)->print(Out, Context, SubPolicy, Indentation);
+ }
+}
+
+void Decl::dump(ASTContext &Context) {
+ print(llvm::errs(), Context);
+}
+
+llvm::raw_ostream& DeclPrinter::Indent() {
+ for (unsigned i = 0; i < Indentation; ++i)
+ Out << " ";
+ return Out;
+}
+
+void DeclPrinter::ProcessDeclGroup(llvm::SmallVectorImpl<Decl*>& Decls) {
+ this->Indent();
+ Decl::printGroup(Decls.data(), Decls.size(), Out, Context,
+ Policy, Indentation);
+ Out << ";\n";
+ Decls.clear();
+
+}
+
+//----------------------------------------------------------------------------
+// Common C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
+ if (Indent)
+ Indentation += Policy.Indentation;
+
+ llvm::SmallVector<Decl*, 2> Decls;
+ for (DeclContext::decl_iterator D = DC->decls_begin(Context),
+ DEnd = DC->decls_end(Context);
+ D != DEnd; ++D) {
+ if (!Policy.Dump) {
+ // Skip over implicit declarations in pretty-printing mode.
+ if (D->isImplicit()) continue;
+ // FIXME: Ugly hack so we don't pretty-print the builtin declaration
+ // of __builtin_va_list. There should be some other way to check that.
+ if (isa<NamedDecl>(*D) && cast<NamedDecl>(*D)->getNameAsString() ==
+ "__builtin_va_list")
+ continue;
+ }
+
+ // The next bits of code handles stuff like "struct {int x;} a,b"; we're
+ // forced to merge the declarations because there's no other way to
+ // refer to the struct in question. This limited merging is safe without
+ // a bunch of other checks because it only merges declarations directly
+ // referring to the tag, not typedefs.
+ //
+ // Check whether the current declaration should be grouped with a previous
+ // unnamed struct.
+ QualType CurDeclType = getDeclType(*D);
+ if (!Decls.empty() && !CurDeclType.isNull()) {
+ QualType BaseType = GetBaseType(CurDeclType);
+ if (!BaseType.isNull() && isa<TagType>(BaseType) &&
+ cast<TagType>(BaseType)->getDecl() == Decls[0]) {
+ Decls.push_back(*D);
+ continue;
+ }
+ }
+
+ // If we have a merged group waiting to be handled, handle it now.
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ // If the current declaration is an unnamed tag type, save it
+ // so we can merge it with the subsequent declaration(s) using it.
+ if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) {
+ Decls.push_back(*D);
+ continue;
+ }
+ this->Indent();
+ Visit(*D);
+
+ // FIXME: Need to be able to tell the DeclPrinter when
+ const char *Terminator = 0;
+ if (isa<FunctionDecl>(*D) &&
+ cast<FunctionDecl>(*D)->isThisDeclarationADefinition())
+ Terminator = 0;
+ else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody())
+ Terminator = 0;
+ else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
+ isa<ObjCImplementationDecl>(*D) ||
+ isa<ObjCInterfaceDecl>(*D) ||
+ isa<ObjCProtocolDecl>(*D) ||
+ isa<ObjCCategoryImplDecl>(*D) ||
+ isa<ObjCCategoryDecl>(*D))
+ Terminator = 0;
+ else if (isa<EnumConstantDecl>(*D)) {
+ DeclContext::decl_iterator Next = D;
+ ++Next;
+ if (Next != DEnd)
+ Terminator = ",";
+ } else
+ Terminator = ";";
+
+ if (Terminator)
+ Out << Terminator;
+ Out << "\n";
+ }
+
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ if (Indent)
+ Indentation -= Policy.Indentation;
+}
+
+void DeclPrinter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ VisitDeclContext(D, false);
+}
+
+void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
+ std::string S = D->getNameAsString();
+ D->getUnderlyingType().getAsStringInternal(S, Policy);
+ if (!Policy.SuppressSpecifiers)
+ Out << "typedef ";
+ Out << S;
+}
+
+void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
+ Out << "enum " << D->getNameAsString() << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+}
+
+void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
+ Out << D->getKindName();
+ if (D->getIdentifier()) {
+ Out << " ";
+ Out << D->getNameAsString();
+ }
+
+ if (D->isDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ Out << D->getNameAsString();
+ if (Expr *Init = D->getInitExpr()) {
+ Out << " = ";
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+}
+
+void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
+ if (!Policy.SuppressSpecifiers) {
+ switch (D->getStorageClass()) {
+ case FunctionDecl::None: break;
+ case FunctionDecl::Extern: Out << "extern "; break;
+ case FunctionDecl::Static: Out << "static "; break;
+ case FunctionDecl::PrivateExtern: Out << "__private_extern__ "; break;
+ }
+
+ if (D->isInline()) Out << "inline ";
+ if (D->isVirtualAsWritten()) Out << "virtual ";
+ }
+
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressSpecifiers = false;
+ std::string Proto = D->getNameAsString();
+ if (isa<FunctionType>(D->getType().getTypePtr())) {
+ const FunctionType *AFT = D->getType()->getAsFunctionType();
+
+ const FunctionProtoType *FT = 0;
+ if (D->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(AFT);
+
+ Proto += "(";
+ if (FT) {
+ llvm::raw_string_ostream POut(Proto);
+ DeclPrinter ParamPrinter(POut, Context, SubPolicy, Indentation);
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i) POut << ", ";
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ }
+
+ if (FT->isVariadic()) {
+ if (D->getNumParams()) POut << ", ";
+ POut << "...";
+ }
+ } else if (D->isThisDeclarationADefinition() && !D->hasPrototype()) {
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i)
+ Proto += ", ";
+ Proto += D->getParamDecl(i)->getNameAsString();
+ }
+ }
+
+ Proto += ")";
+ AFT->getResultType().getAsStringInternal(Proto, Policy);
+ } else {
+ D->getType().getAsStringInternal(Proto, Policy);
+ }
+
+ Out << Proto;
+
+ if (D->isPure())
+ Out << " = 0";
+ else if (D->isDeleted())
+ Out << " = delete";
+ else if (D->isThisDeclarationADefinition()) {
+ if (!D->hasPrototype() && D->getNumParams()) {
+ // This is a K&R function definition, so we need to print the
+ // parameters.
+ Out << '\n';
+ DeclPrinter ParamPrinter(Out, Context, SubPolicy, Indentation);
+ Indentation += Policy.Indentation;
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ Indent();
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ Out << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ } else
+ Out << ' ';
+
+ D->getBody(Context)->printPretty(Out, Context, 0, SubPolicy, Indentation);
+ Out << '\n';
+ }
+}
+
+void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isMutable())
+ Out << "mutable ";
+
+ std::string Name = D->getNameAsString();
+ D->getType().getAsStringInternal(Name, Policy);
+ Out << Name;
+
+ if (D->isBitField()) {
+ Out << " : ";
+ D->getBitWidth()->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+}
+
+void DeclPrinter::VisitVarDecl(VarDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->getStorageClass() != VarDecl::None)
+ Out << VarDecl::getStorageClassSpecifierString(D->getStorageClass()) << " ";
+
+ if (!Policy.SuppressSpecifiers && D->isThreadSpecified())
+ Out << "__thread ";
+
+ std::string Name = D->getNameAsString();
+ QualType T = D->getType();
+ if (OriginalParmVarDecl *Parm = dyn_cast<OriginalParmVarDecl>(D))
+ T = Parm->getOriginalType();
+ T.getAsStringInternal(Name, Policy);
+ Out << Name;
+ if (D->getInit()) {
+ if (D->hasCXXDirectInitializer())
+ Out << "(";
+ else
+ Out << " = ";
+ D->getInit()->printPretty(Out, Context, 0, Policy, Indentation);
+ if (D->hasCXXDirectInitializer())
+ Out << ")";
+ }
+}
+
+void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
+ VisitVarDecl(D);
+}
+
+void DeclPrinter::VisitOriginalParmVarDecl(OriginalParmVarDecl *D) {
+ VisitVarDecl(D);
+}
+
+void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
+ Out << "__asm (";
+ D->getAsmString()->printPretty(Out, Context, 0, Policy, Indentation);
+ Out << ")";
+}
+
+//----------------------------------------------------------------------------
+// C++ declarations
+//----------------------------------------------------------------------------
+void DeclPrinter::VisitOverloadedFunctionDecl(OverloadedFunctionDecl *D) {
+ assert(false &&
+ "OverloadedFunctionDecls aren't really decls and are never printed");
+}
+
+void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
+ Out << "namespace " << D->getNameAsString() << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+}
+
+void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ Out << "using namespace ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getNominatedNamespace()->getNameAsString();
+}
+
+void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ Out << "namespace " << D->getNameAsString() << " = ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getAliasedNamespace()->getNameAsString();
+}
+
+void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ Out << D->getKindName();
+ if (D->getIdentifier()) {
+ Out << " ";
+ Out << D->getNameAsString();
+ }
+
+ if (D->isDefinition()) {
+ // Print the base classes
+ if (D->getNumBases()) {
+ Out << " : ";
+ for(CXXRecordDecl::base_class_iterator Base = D->bases_begin(),
+ BaseEnd = D->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (Base != D->bases_begin())
+ Out << ", ";
+
+ if (Base->isVirtual())
+ Out << "virtual ";
+
+ switch(Base->getAccessSpecifierAsWritten()) {
+ case AS_none: break;
+ case AS_public: Out << "public "; break;
+ case AS_protected: Out << "protected "; break;
+ case AS_private: Out << " private "; break;
+ }
+
+ Out << Base->getType().getAsString(Policy);
+ }
+ }
+
+ // Print the class definition
+ // FIXME: Doesn't print access specifiers, e.g., "public:"
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ const char *l;
+ if (D->getLanguage() == LinkageSpecDecl::lang_c)
+ l = "C";
+ else {
+ assert(D->getLanguage() == LinkageSpecDecl::lang_cxx &&
+ "unknown language in linkage specification");
+ l = "C++";
+ }
+
+ Out << "extern \"" << l << "\" ";
+ if (D->hasBraces()) {
+ Out << "{\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ } else
+ Visit(*D->decls_begin(Context));
+}
+
+void DeclPrinter::VisitTemplateDecl(TemplateDecl *D) {
+ // TODO: Write template parameters.
+ Out << "template <...> ";
+ Visit(D->getTemplatedDecl());
+}
+
+//----------------------------------------------------------------------------
+// Objective-C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::VisitObjCClassDecl(ObjCClassDecl *D) {
+ Out << "@class ";
+ for (ObjCClassDecl::iterator I = D->begin(), E = D->end();
+ I != E; ++I) {
+ if (I != D->begin()) Out << ", ";
+ Out << (*I)->getNameAsString();
+ }
+}
+
+void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
+ if (OMD->isInstanceMethod())
+ Out << "- ";
+ else
+ Out << "+ ";
+ if (!OMD->getResultType().isNull())
+ Out << '(' << OMD->getResultType().getAsString(Policy) << ")";
+
+ std::string name = OMD->getSelector().getAsString();
+ std::string::size_type pos, lastPos = 0;
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ // FIXME: selector is missing here!
+ pos = name.find_first_of(":", lastPos);
+ Out << " " << name.substr(lastPos, pos - lastPos);
+ Out << ":(" << (*PI)->getType().getAsString(Policy) << ")"
+ << (*PI)->getNameAsString();
+ lastPos = pos + 1;
+ }
+
+ if (OMD->param_begin() == OMD->param_end())
+ Out << " " << name;
+
+ if (OMD->isVariadic())
+ Out << ", ...";
+
+ if (OMD->getBody()) {
+ Out << ' ';
+ OMD->getBody()->printPretty(Out, Context, 0, Policy);
+ Out << '\n';
+ }
+}
+
+void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ if (SID)
+ Out << "@implementation " << I << " : " << SID->getNameAsString();
+ else
+ Out << "@implementation " << I;
+ Out << "\n";
+ VisitDeclContext(OID, false);
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ if (SID)
+ Out << "@interface " << I << " : " << SID->getNameAsString();
+ else
+ Out << "@interface " << I;
+
+ // Protocols?
+ const ObjCList<ObjCProtocolDecl> &Protocols = OID->getReferencedProtocols();
+ if (!Protocols.empty()) {
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ Out << (I == Protocols.begin() ? '<' : ',') << (*I)->getNameAsString();
+ }
+
+ if (!Protocols.empty())
+ Out << "> ";
+
+ if (OID->ivar_size() > 0) {
+ Out << "{\n";
+ Indentation += Policy.Indentation;
+ for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
+ E = OID->ivar_end(); I != E; ++I) {
+ Indent() << (*I)->getType().getAsString(Policy)
+ << ' ' << (*I)->getNameAsString() << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ Out << "}\n";
+ }
+
+ VisitDeclContext(OID, false);
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D) {
+ Out << "@protocol ";
+ for (ObjCForwardProtocolDecl::protocol_iterator I = D->protocol_begin(),
+ E = D->protocol_end();
+ I != E; ++I) {
+ if (I != D->protocol_begin()) Out << ", ";
+ Out << (*I)->getNameAsString();
+ }
+}
+
+void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
+ Out << "@protocol " << PID->getNameAsString() << '\n';
+ VisitDeclContext(PID, false);
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
+ Out << "@implementation "
+ << PID->getClassInterface()->getNameAsString()
+ << '(' << PID->getNameAsString() << ")\n";
+
+ VisitDeclContext(PID, false);
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) {
+ Out << "@interface "
+ << PID->getClassInterface()->getNameAsString()
+ << '(' << PID->getNameAsString() << ")\n";
+ VisitDeclContext(PID, false);
+ Out << "@end";
+
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *AID) {
+ Out << "@compatibility_alias " << AID->getNameAsString()
+ << ' ' << AID->getClassInterface()->getNameAsString() << ";\n";
+}
+
+/// PrintObjCPropertyDecl - print a property declaration.
+///
+void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
+ if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Required)
+ Out << "@required\n";
+ else if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ Out << "@optional\n";
+
+ Out << "@property";
+ if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
+ bool first = true;
+ Out << " (";
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readonly) {
+ Out << (first ? ' ' : ',') << "readonly";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ Out << (first ? ' ' : ',') << "getter = "
+ << PDecl->getGetterName().getAsString();
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ Out << (first ? ' ' : ',') << "setter = "
+ << PDecl->getSetterName().getAsString();
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
+ Out << (first ? ' ' : ',') << "assign";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readwrite) {
+ Out << (first ? ' ' : ',') << "readwrite";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
+ Out << (first ? ' ' : ',') << "retain";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
+ Out << (first ? ' ' : ',') << "copy";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_nonatomic) {
+ Out << (first ? ' ' : ',') << "nonatomic";
+ first = false;
+ }
+ Out << " )";
+ }
+ Out << ' ' << PDecl->getType().getAsString(Policy)
+ << ' ' << PDecl->getNameAsString();
+}
+
+void DeclPrinter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *PID) {
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
+ Out << "@synthesize ";
+ else
+ Out << "@dynamic ";
+ Out << PID->getPropertyDecl()->getNameAsString();
+ if (PID->getPropertyIvarDecl())
+ Out << "=" << PID->getPropertyIvarDecl()->getNameAsString();
+}
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
new file mode 100644
index 0000000..f38ee82
--- /dev/null
+++ b/lib/AST/DeclTemplate.cpp
@@ -0,0 +1,324 @@
+//===--- DeclCXX.cpp - C++ Declaration AST Node Implementation ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes for templates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// TemplateParameterList Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ Decl **Params, unsigned NumParams,
+ SourceLocation RAngleLoc)
+ : TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
+ NumParams(NumParams) {
+ for (unsigned Idx = 0; Idx < NumParams; ++Idx)
+ begin()[Idx] = Params[Idx];
+}
+
+TemplateParameterList *
+TemplateParameterList::Create(ASTContext &C, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc, Decl **Params,
+ unsigned NumParams, SourceLocation RAngleLoc) {
+ unsigned Size = sizeof(TemplateParameterList) + sizeof(Decl *) * NumParams;
+ unsigned Align = llvm::AlignOf<TemplateParameterList>::Alignment;
+ void *Mem = C.Allocate(Size, Align);
+ return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
+ NumParams, RAngleLoc);
+}
+
+unsigned TemplateParameterList::getMinRequiredArguments() const {
+ unsigned NumRequiredArgs = size();
+ iterator Param = const_cast<TemplateParameterList *>(this)->end(),
+ ParamBegin = const_cast<TemplateParameterList *>(this)->begin();
+ while (Param != ParamBegin) {
+ --Param;
+ if (!(isa<TemplateTypeParmDecl>(*Param) &&
+ cast<TemplateTypeParmDecl>(*Param)->hasDefaultArgument()) &&
+ !(isa<NonTypeTemplateParmDecl>(*Param) &&
+ cast<NonTypeTemplateParmDecl>(*Param)->hasDefaultArgument()) &&
+ !(isa<TemplateTemplateParmDecl>(*Param) &&
+ cast<TemplateTemplateParmDecl>(*Param)->hasDefaultArgument()))
+ break;
+
+ --NumRequiredArgs;
+ }
+
+ return NumRequiredArgs;
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateDecl::~TemplateDecl() {
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl) {
+ return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl);
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl,
+ ClassTemplateDecl *PrevDecl) {
+ Common *CommonPtr;
+ if (PrevDecl)
+ CommonPtr = PrevDecl->CommonPtr;
+ else
+ CommonPtr = new (C) Common;
+
+ return new (C) ClassTemplateDecl(DC, L, Name, Params, Decl, PrevDecl,
+ CommonPtr);
+}
+
+ClassTemplateDecl::~ClassTemplateDecl() {
+ assert(CommonPtr == 0 && "ClassTemplateDecl must be explicitly destroyed");
+}
+
+void ClassTemplateDecl::Destroy(ASTContext& C) {
+ if (!PreviousDeclaration) {
+ CommonPtr->~Common();
+ C.Deallocate((void*)CommonPtr);
+ }
+ CommonPtr = 0;
+
+ this->~ClassTemplateDecl();
+ C.Deallocate((void*)this);
+}
+
+QualType ClassTemplateDecl::getInjectedClassNameType(ASTContext &Context) {
+ if (!CommonPtr->InjectedClassNameType.isNull())
+ return CommonPtr->InjectedClassNameType;
+
+ // FIXME: n2800 14.6.1p1 should say how the template arguments
+ // corresponding to template parameter packs should be pack
+ // expansions. We already say that in 14.6.2.1p2, so it would be
+ // better to fix that redundancy.
+
+ TemplateParameterList *Params = getTemplateParameters();
+
+ llvm::SmallVector<TemplateArgument, 16> TemplateArgs;
+ llvm::SmallVector<TemplateArgument, 16> CanonTemplateArgs;
+ TemplateArgs.reserve(Params->size());
+ CanonTemplateArgs.reserve(Params->size());
+
+ for (TemplateParameterList::iterator
+ Param = Params->begin(), ParamEnd = Params->end();
+ Param != ParamEnd; ++Param) {
+ if (isa<TemplateTypeParmDecl>(*Param)) {
+ QualType ParamType = Context.getTypeDeclType(cast<TypeDecl>(*Param));
+ TemplateArgs.push_back(TemplateArgument((*Param)->getLocation(),
+ ParamType));
+ CanonTemplateArgs.push_back(
+ TemplateArgument((*Param)->getLocation(),
+ Context.getCanonicalType(ParamType)));
+ } else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ // FIXME: Build canonical expression, too!
+ Expr *E = new (Context) DeclRefExpr(NTTP, NTTP->getType(),
+ NTTP->getLocation(),
+ NTTP->getType()->isDependentType(),
+ /*Value-dependent=*/true);
+ TemplateArgs.push_back(TemplateArgument(E));
+ CanonTemplateArgs.push_back(TemplateArgument(E));
+ } else {
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*Param);
+ TemplateArgs.push_back(TemplateArgument(TTP->getLocation(), TTP));
+ CanonTemplateArgs.push_back(TemplateArgument(TTP->getLocation(),
+ Context.getCanonicalDecl(TTP)));
+ }
+ }
+
+ // FIXME: I should really move the "build-the-canonical-type" logic
+ // into ASTContext::getTemplateSpecializationType.
+ TemplateName Name = TemplateName(this);
+ QualType CanonType = Context.getTemplateSpecializationType(
+ Context.getCanonicalTemplateName(Name),
+ &CanonTemplateArgs[0],
+ CanonTemplateArgs.size());
+
+ CommonPtr->InjectedClassNameType
+ = Context.getTemplateSpecializationType(Name,
+ &TemplateArgs[0],
+ TemplateArgs.size(),
+ CanonType);
+ return CommonPtr->InjectedClassNameType;
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTypeParm Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id, bool Typename) {
+ QualType Type = C.getTemplateTypeParmType(D, P, Id);
+ return new (C) TemplateTypeParmDecl(DC, L, Id, Typename, Type);
+}
+
+//===----------------------------------------------------------------------===//
+// NonTypeTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id, QualType T,
+ SourceLocation TypeSpecStartLoc) {
+ return new (C) NonTypeTemplateParmDecl(DC, L, D, P, Id, T,
+ TypeSpecStartLoc);
+}
+
+SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
+ return DefaultArgument? DefaultArgument->getSourceRange().getBegin()
+ : SourceLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ IdentifierInfo *Id,
+ TemplateParameterList *Params) {
+ return new (C) TemplateTemplateParmDecl(DC, L, D, P, Id, Params);
+}
+
+SourceLocation TemplateTemplateParmDecl::getDefaultArgumentLoc() const {
+ return DefaultArgument? DefaultArgument->getSourceRange().getBegin()
+ : SourceLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgument Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateArgument::TemplateArgument(Expr *E) : Kind(Expression) {
+ TypeOrValue = reinterpret_cast<uintptr_t>(E);
+ StartLoc = E->getSourceRange().getBegin();
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgumentList Implementation
+//===----------------------------------------------------------------------===//
+TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
+ TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ bool CopyArgs)
+ : NumArguments(NumTemplateArgs) {
+ if (!CopyArgs) {
+ Arguments.setPointer(TemplateArgs);
+ Arguments.setInt(1);
+ return;
+ }
+
+ unsigned Size = sizeof(TemplateArgument) * NumTemplateArgs;
+ unsigned Align = llvm::AlignOf<TemplateArgument>::Alignment;
+ void *Mem = Context.Allocate(Size, Align);
+ Arguments.setPointer((TemplateArgument *)Mem);
+ Arguments.setInt(0);
+
+ TemplateArgument *Args = (TemplateArgument *)Mem;
+ for (unsigned I = 0; I != NumTemplateArgs; ++I)
+ new (Args + I) TemplateArgument(TemplateArgs[I]);
+}
+
+TemplateArgumentList::~TemplateArgumentList() {
+ // FIXME: Deallocate template arguments
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+ClassTemplateSpecializationDecl::
+ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK,
+ DeclContext *DC, SourceLocation L,
+ ClassTemplateDecl *SpecializedTemplate,
+ TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs)
+ : CXXRecordDecl(DK,
+ SpecializedTemplate->getTemplatedDecl()->getTagKind(),
+ DC, L,
+ // FIXME: Should we use DeclarationName for the name of
+ // class template specializations?
+ SpecializedTemplate->getIdentifier()),
+ SpecializedTemplate(SpecializedTemplate),
+ TemplateArgs(Context, TemplateArgs, NumTemplateArgs, /*CopyArgs=*/true),
+ SpecializationKind(TSK_Undeclared) {
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::Create(ASTContext &Context,
+ DeclContext *DC, SourceLocation L,
+ ClassTemplateDecl *SpecializedTemplate,
+ TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ ClassTemplateSpecializationDecl *PrevDecl) {
+ ClassTemplateSpecializationDecl *Result
+ = new (Context)ClassTemplateSpecializationDecl(Context,
+ ClassTemplateSpecialization,
+ DC, L,
+ SpecializedTemplate,
+ TemplateArgs,
+ NumTemplateArgs);
+ Context.getTypeDeclType(Result, PrevDecl);
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplatePartialSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::
+Create(ASTContext &Context, DeclContext *DC, SourceLocation L,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ TemplateArgument *TemplateArgs, unsigned NumTemplateArgs,
+ ClassTemplatePartialSpecializationDecl *PrevDecl) {
+ ClassTemplatePartialSpecializationDecl *Result
+ = new (Context)ClassTemplatePartialSpecializationDecl(Context,
+ DC, L, Params,
+ SpecializedTemplate,
+ TemplateArgs,
+ NumTemplateArgs);
+ Result->setSpecializationKind(TSK_ExplicitSpecialization);
+ Context.getTypeDeclType(Result, PrevDecl);
+ return Result;
+}
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
new file mode 100644
index 0000000..a17abde
--- /dev/null
+++ b/lib/AST/DeclarationName.cpp
@@ -0,0 +1,355 @@
+//===-- DeclarationName.cpp - Declaration names implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DeclarationName and DeclarationNameTable
+// classes.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+using namespace clang;
+
+namespace clang {
+/// CXXSpecialName - Records the type associated with one of the
+/// "special" kinds of declaration names in C++, e.g., constructors,
+/// destructors, and conversion functions.
+class CXXSpecialName
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+public:
+ /// Type - The type associated with this declaration name.
+ QualType Type;
+
+ /// FETokenInfo - Extra information associated with this declaration
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(ExtraKindOrNumArgs);
+ ID.AddPointer(Type.getAsOpaquePtr());
+ }
+};
+
+/// CXXOperatorIdName - Contains extra information for the name of an
+/// overloaded operator in C++, such as "operator+.
+class CXXOperatorIdName : public DeclarationNameExtra {
+public:
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+};
+
+bool operator<(DeclarationName LHS, DeclarationName RHS) {
+ if (IdentifierInfo *LhsId = LHS.getAsIdentifierInfo())
+ if (IdentifierInfo *RhsId = RHS.getAsIdentifierInfo())
+ return strcmp(LhsId->getName(), RhsId->getName()) < 0;
+
+ return LHS.getAsOpaqueInteger() < RHS.getAsOpaqueInteger();
+}
+
+} // end namespace clang
+
+DeclarationName::DeclarationName(Selector Sel) {
+ if (!Sel.getAsOpaquePtr()) {
+ Ptr = StoredObjCZeroArgSelector;
+ return;
+ }
+
+ switch (Sel.getNumArgs()) {
+ case 0:
+ Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
+ Ptr |= StoredObjCZeroArgSelector;
+ break;
+
+ case 1:
+ Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
+ Ptr |= StoredObjCOneArgSelector;
+ break;
+
+ default:
+ Ptr = Sel.InfoPtr & ~Selector::ArgFlags;
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned MultiKeywordSelector");
+ Ptr |= StoredDeclarationNameExtra;
+ break;
+ }
+}
+
+DeclarationName::NameKind DeclarationName::getNameKind() const {
+ switch (getStoredNameKind()) {
+ case StoredIdentifier: return Identifier;
+ case StoredObjCZeroArgSelector: return ObjCZeroArgSelector;
+ case StoredObjCOneArgSelector: return ObjCOneArgSelector;
+
+ case StoredDeclarationNameExtra:
+ switch (getExtra()->ExtraKindOrNumArgs) {
+ case DeclarationNameExtra::CXXConstructor:
+ return CXXConstructorName;
+
+ case DeclarationNameExtra::CXXDestructor:
+ return CXXDestructorName;
+
+ case DeclarationNameExtra::CXXConversionFunction:
+ return CXXConversionFunctionName;
+
+ case DeclarationNameExtra::CXXUsingDirective:
+ return CXXUsingDirective;
+
+ default:
+ // Check if we have one of the CXXOperator* enumeration values.
+ if (getExtra()->ExtraKindOrNumArgs <
+ DeclarationNameExtra::CXXUsingDirective)
+ return CXXOperatorName;
+
+ return ObjCMultiArgSelector;
+ }
+ break;
+ }
+
+ // Can't actually get here.
+ assert(0 && "This should be unreachable!");
+ return Identifier;
+}
+
+std::string DeclarationName::getAsString() const {
+ switch (getNameKind()) {
+ case Identifier:
+ if (const IdentifierInfo *II = getAsIdentifierInfo())
+ return II->getName();
+ return "";
+
+ case ObjCZeroArgSelector:
+ case ObjCOneArgSelector:
+ case ObjCMultiArgSelector:
+ return getObjCSelector().getAsString();
+
+ case CXXConstructorName: {
+ QualType ClassType = getCXXNameType();
+ if (const RecordType *ClassRec = ClassType->getAsRecordType())
+ return ClassRec->getDecl()->getNameAsString();
+ return ClassType.getAsString();
+ }
+
+ case CXXDestructorName: {
+ std::string Result = "~";
+ QualType Type = getCXXNameType();
+ if (const RecordType *Rec = Type->getAsRecordType())
+ Result += Rec->getDecl()->getNameAsString();
+ else
+ Result += Type.getAsString();
+ return Result;
+ }
+
+ case CXXOperatorName: {
+ static const char *OperatorNames[NUM_OVERLOADED_OPERATORS] = {
+ 0,
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+ const char *OpName = OperatorNames[getCXXOverloadedOperator()];
+ assert(OpName && "not an overloaded operator");
+
+ std::string Result = "operator";
+ if (OpName[0] >= 'a' && OpName[0] <= 'z')
+ Result += ' ';
+ Result += OpName;
+ return Result;
+ }
+
+ case CXXConversionFunctionName: {
+ std::string Result = "operator ";
+ QualType Type = getCXXNameType();
+ if (const RecordType *Rec = Type->getAsRecordType())
+ Result += Rec->getDecl()->getNameAsString();
+ else
+ Result += Type.getAsString();
+ return Result;
+ }
+ case CXXUsingDirective:
+ return "<using-directive>";
+ }
+
+ assert(false && "Unexpected declaration name kind");
+ return "";
+}
+
+QualType DeclarationName::getCXXNameType() const {
+ if (CXXSpecialName *CXXName = getAsCXXSpecialName())
+ return CXXName->Type;
+ else
+ return QualType();
+}
+
+OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const {
+ if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) {
+ unsigned value
+ = CXXOp->ExtraKindOrNumArgs - DeclarationNameExtra::CXXConversionFunction;
+ return static_cast<OverloadedOperatorKind>(value);
+ } else {
+ return OO_None;
+ }
+}
+
+Selector DeclarationName::getObjCSelector() const {
+ switch (getNameKind()) {
+ case ObjCZeroArgSelector:
+ return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 0);
+
+ case ObjCOneArgSelector:
+ return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 1);
+
+ case ObjCMultiArgSelector:
+ return Selector(reinterpret_cast<MultiKeywordSelector *>(Ptr & ~PtrMask));
+
+ default:
+ break;
+ }
+
+ return Selector();
+}
+
+void *DeclarationName::getFETokenInfoAsVoid() const {
+ switch (getNameKind()) {
+ case Identifier:
+ return getAsIdentifierInfo()->getFETokenInfo<void>();
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ return getAsCXXSpecialName()->FETokenInfo;
+
+ case CXXOperatorName:
+ return getAsCXXOperatorIdName()->FETokenInfo;
+
+ default:
+ assert(false && "Declaration name has no FETokenInfo");
+ }
+ return 0;
+}
+
+void DeclarationName::setFETokenInfo(void *T) {
+ switch (getNameKind()) {
+ case Identifier:
+ getAsIdentifierInfo()->setFETokenInfo(T);
+ break;
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ getAsCXXSpecialName()->FETokenInfo = T;
+ break;
+
+ case CXXOperatorName:
+ getAsCXXOperatorIdName()->FETokenInfo = T;
+ break;
+
+ default:
+ assert(false && "Declaration name has no FETokenInfo");
+ }
+}
+
+DeclarationName DeclarationName::getUsingDirectiveName() {
+ // Single instance of DeclarationNameExtra for using-directive
+ static DeclarationNameExtra UDirExtra =
+ { DeclarationNameExtra::CXXUsingDirective };
+
+ uintptr_t Ptr = reinterpret_cast<uintptr_t>(&UDirExtra);
+ Ptr |= StoredDeclarationNameExtra;
+
+ return DeclarationName(Ptr);
+}
+
+DeclarationNameTable::DeclarationNameTable() {
+ CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
+
+ // Initialize the overloaded operator names.
+ CXXOperatorNames = new CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
+ for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) {
+ CXXOperatorNames[Op].ExtraKindOrNumArgs
+ = Op + DeclarationNameExtra::CXXConversionFunction;
+ CXXOperatorNames[Op].FETokenInfo = 0;
+ }
+}
+
+DeclarationNameTable::~DeclarationNameTable() {
+ llvm::FoldingSet<CXXSpecialName> *set =
+ static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+ llvm::FoldingSetIterator<CXXSpecialName> I = set->begin(), E = set->end();
+
+ while (I != E) {
+ CXXSpecialName *n = &*I++;
+ delete n;
+ }
+
+ delete set;
+ delete [] CXXOperatorNames;
+}
+
+DeclarationName
+DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
+ QualType Ty) {
+ assert(Kind >= DeclarationName::CXXConstructorName &&
+ Kind <= DeclarationName::CXXConversionFunctionName &&
+ "Kind must be a C++ special name kind");
+ assert(Ty->isCanonical() &&
+ "Can only build C++ special names from canonical types");
+ llvm::FoldingSet<CXXSpecialName> *SpecialNames
+ = static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+
+ DeclarationNameExtra::ExtraKind EKind;
+ switch (Kind) {
+ case DeclarationName::CXXConstructorName:
+ EKind = DeclarationNameExtra::CXXConstructor;
+ assert(Ty.getCVRQualifiers() == 0 &&"Constructor type must be unqualified");
+ break;
+ case DeclarationName::CXXDestructorName:
+ EKind = DeclarationNameExtra::CXXDestructor;
+ assert(Ty.getCVRQualifiers() == 0 && "Destructor type must be unqualified");
+ break;
+ case DeclarationName::CXXConversionFunctionName:
+ EKind = DeclarationNameExtra::CXXConversionFunction;
+ break;
+ default:
+ return DeclarationName();
+ }
+
+ // Unique selector, to guarantee there is one per name.
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(EKind);
+ ID.AddPointer(Ty.getAsOpaquePtr());
+
+ void *InsertPos = 0;
+ if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName(Name);
+
+ CXXSpecialName *SpecialName = new CXXSpecialName;
+ SpecialName->ExtraKindOrNumArgs = EKind;
+ SpecialName->Type = Ty;
+ SpecialName->FETokenInfo = 0;
+
+ SpecialNames->InsertNode(SpecialName, InsertPos);
+ return DeclarationName(SpecialName);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXOperatorName(OverloadedOperatorKind Op) {
+ return DeclarationName(&CXXOperatorNames[(unsigned)Op]);
+}
+
+unsigned
+llvm::DenseMapInfo<clang::DeclarationName>::
+getHashValue(clang::DeclarationName N) {
+ return DenseMapInfo<void*>::getHashValue(N.getAsOpaquePtr());
+}
+
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
new file mode 100644
index 0000000..4a53a41
--- /dev/null
+++ b/lib/AST/Expr.cpp
@@ -0,0 +1,2059 @@
+//===--- Expr.cpp - Expression AST Node Implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include <algorithm>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Primary Expressions.
+//===----------------------------------------------------------------------===//
+
+PredefinedExpr* PredefinedExpr::Clone(ASTContext &C) const {
+ return new (C) PredefinedExpr(Loc, getType(), Type);
+}
+
+IntegerLiteral* IntegerLiteral::Clone(ASTContext &C) const {
+ return new (C) IntegerLiteral(Value, getType(), Loc);
+}
+
+CharacterLiteral* CharacterLiteral::Clone(ASTContext &C) const {
+ return new (C) CharacterLiteral(Value, IsWide, getType(), Loc);
+}
+
+FloatingLiteral* FloatingLiteral::Clone(ASTContext &C) const {
+ bool exact = IsExact;
+ return new (C) FloatingLiteral(Value, &exact, getType(), Loc);
+}
+
+ImaginaryLiteral* ImaginaryLiteral::Clone(ASTContext &C) const {
+ // FIXME: Use virtual Clone(), once it is available
+ Expr *ClonedVal = 0;
+ if (const IntegerLiteral *IntLit = dyn_cast<IntegerLiteral>(Val))
+ ClonedVal = IntLit->Clone(C);
+ else
+ ClonedVal = cast<FloatingLiteral>(Val)->Clone(C);
+ return new (C) ImaginaryLiteral(ClonedVal, getType());
+}
+
+GNUNullExpr* GNUNullExpr::Clone(ASTContext &C) const {
+ return new (C) GNUNullExpr(getType(), TokenLoc);
+}
+
+/// getValueAsApproximateDouble - This returns the value as an inaccurate
+/// double. Note that this may cause loss of precision, but is useful for
+/// debugging dumps, etc.
+double FloatingLiteral::getValueAsApproximateDouble() const {
+ llvm::APFloat V = getValue();
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+StringLiteral *StringLiteral::Create(ASTContext &C, const char *StrData,
+ unsigned ByteLength, bool Wide,
+ QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumStrs) {
+ // Allocate enough space for the StringLiteral plus an array of locations for
+ // any concatenated string tokens.
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignof<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(Ty);
+
+ // OPTIMIZE: could allocate this appended to the StringLiteral.
+ char *AStrData = new (C, 1) char[ByteLength];
+ memcpy(AStrData, StrData, ByteLength);
+ SL->StrData = AStrData;
+ SL->ByteLength = ByteLength;
+ SL->IsWide = Wide;
+ SL->TokLocs[0] = Loc[0];
+ SL->NumConcatenated = NumStrs;
+
+ if (NumStrs != 1)
+ memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1));
+ return SL;
+}
+
+StringLiteral *StringLiteral::CreateEmpty(ASTContext &C, unsigned NumStrs) {
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignof<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(QualType());
+ SL->StrData = 0;
+ SL->ByteLength = 0;
+ SL->NumConcatenated = NumStrs;
+ return SL;
+}
+
+StringLiteral* StringLiteral::Clone(ASTContext &C) const {
+ return Create(C, StrData, ByteLength, IsWide, getType(),
+ TokLocs, NumConcatenated);
+}
+
+void StringLiteral::Destroy(ASTContext &C) {
+ C.Deallocate(const_cast<char*>(StrData));
+ this->~StringLiteral();
+ C.Deallocate(this);
+}
+
+void StringLiteral::setStrData(ASTContext &C, const char *Str, unsigned Len) {
+ if (StrData)
+ C.Deallocate(const_cast<char*>(StrData));
+
+ char *AStrData = new (C, 1) char[Len];
+ memcpy(AStrData, Str, Len);
+ StrData = AStrData;
+ ByteLength = Len;
+}
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "sizeof" or "[pre]++".
+const char *UnaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ default: assert(0 && "Unknown unary operator");
+ case PostInc: return "++";
+ case PostDec: return "--";
+ case PreInc: return "++";
+ case PreDec: return "--";
+ case AddrOf: return "&";
+ case Deref: return "*";
+ case Plus: return "+";
+ case Minus: return "-";
+ case Not: return "~";
+ case LNot: return "!";
+ case Real: return "__real";
+ case Imag: return "__imag";
+ case Extension: return "__extension__";
+ case OffsetOf: return "__builtin_offsetof";
+ }
+}
+
+UnaryOperator::Opcode
+UnaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix) {
+ switch (OO) {
+ default: assert(false && "No unary operator for overloaded function");
+ case OO_PlusPlus: return Postfix ? PostInc : PreInc;
+ case OO_MinusMinus: return Postfix ? PostDec : PreDec;
+ case OO_Amp: return AddrOf;
+ case OO_Star: return Deref;
+ case OO_Plus: return Plus;
+ case OO_Minus: return Minus;
+ case OO_Tilde: return Not;
+ case OO_Exclaim: return LNot;
+ }
+}
+
+OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
+ switch (Opc) {
+ case PostInc: case PreInc: return OO_PlusPlus;
+ case PostDec: case PreDec: return OO_MinusMinus;
+ case AddrOf: return OO_Amp;
+ case Deref: return OO_Star;
+ case Plus: return OO_Plus;
+ case Minus: return OO_Minus;
+ case Not: return OO_Tilde;
+ case LNot: return OO_Exclaim;
+ default: return OO_None;
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Postfix Operators.
+//===----------------------------------------------------------------------===//
+
+CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, Expr **args,
+ unsigned numargs, QualType t, SourceLocation rparenloc)
+ : Expr(SC, t,
+ fn->isTypeDependent() || hasAnyTypeDependentArguments(args, numargs),
+ fn->isValueDependent() || hasAnyValueDependentArguments(args,numargs)),
+ NumArgs(numargs) {
+
+ SubExprs = new (C) Stmt*[numargs+1];
+ SubExprs[FN] = fn;
+ for (unsigned i = 0; i != numargs; ++i)
+ SubExprs[i+ARGS_START] = args[i];
+
+ RParenLoc = rparenloc;
+}
+
+CallExpr::CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs,
+ QualType t, SourceLocation rparenloc)
+ : Expr(CallExprClass, t,
+ fn->isTypeDependent() || hasAnyTypeDependentArguments(args, numargs),
+ fn->isValueDependent() || hasAnyValueDependentArguments(args,numargs)),
+ NumArgs(numargs) {
+
+ SubExprs = new (C) Stmt*[numargs+1];
+ SubExprs[FN] = fn;
+ for (unsigned i = 0; i != numargs; ++i)
+ SubExprs[i+ARGS_START] = args[i];
+
+ RParenLoc = rparenloc;
+}
+
+CallExpr::CallExpr(ASTContext &C, EmptyShell Empty)
+ : Expr(CallExprClass, Empty), SubExprs(0), NumArgs(0) {
+ SubExprs = new (C) Stmt*[1];
+}
+
+void CallExpr::Destroy(ASTContext& C) {
+ DestroyChildren(C);
+ if (SubExprs) C.Deallocate(SubExprs);
+ this->~CallExpr();
+ C.Deallocate(this);
+}
+
+/// setNumArgs - This changes the number of arguments present in this call.
+/// Any orphaned expressions are deleted by this, and any new operands are set
+/// to null.
+void CallExpr::setNumArgs(ASTContext& C, unsigned NumArgs) {
+ // No change, just return.
+ if (NumArgs == getNumArgs()) return;
+
+ // If shrinking # arguments, just delete the extras and forgot them.
+ if (NumArgs < getNumArgs()) {
+ for (unsigned i = NumArgs, e = getNumArgs(); i != e; ++i)
+ getArg(i)->Destroy(C);
+ this->NumArgs = NumArgs;
+ return;
+ }
+
+ // Otherwise, we are growing the # arguments. New an bigger argument array.
+ Stmt **NewSubExprs = new Stmt*[NumArgs+1];
+ // Copy over args.
+ for (unsigned i = 0; i != getNumArgs()+ARGS_START; ++i)
+ NewSubExprs[i] = SubExprs[i];
+ // Null out new args.
+ for (unsigned i = getNumArgs()+ARGS_START; i != NumArgs+ARGS_START; ++i)
+ NewSubExprs[i] = 0;
+
+ if (SubExprs) C.Deallocate(SubExprs);
+ SubExprs = NewSubExprs;
+ this->NumArgs = NumArgs;
+}
+
+/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
+/// not, return 0.
+unsigned CallExpr::isBuiltinCall(ASTContext &Context) const {
+ // All simple function calls (e.g. func()) are implicitly cast to pointer to
+ // function. As a result, we try and obtain the DeclRefExpr from the
+ // ImplicitCastExpr.
+ const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee());
+ if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
+ return 0;
+
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
+ if (!DRE)
+ return 0;
+
+ const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (!FDecl)
+ return 0;
+
+ if (!FDecl->getIdentifier())
+ return 0;
+
+ return FDecl->getBuiltinID(Context);
+}
+
+QualType CallExpr::getCallReturnType() const {
+ QualType CalleeType = getCallee()->getType();
+ if (const PointerType *FnTypePtr = CalleeType->getAsPointerType())
+ CalleeType = FnTypePtr->getPointeeType();
+ else if (const BlockPointerType *BPT = CalleeType->getAsBlockPointerType())
+ CalleeType = BPT->getPointeeType();
+
+ const FunctionType *FnType = CalleeType->getAsFunctionType();
+ return FnType->getResultType();
+}
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "<<=".
+const char *BinaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ case PtrMemD: return ".*";
+ case PtrMemI: return "->*";
+ case Mul: return "*";
+ case Div: return "/";
+ case Rem: return "%";
+ case Add: return "+";
+ case Sub: return "-";
+ case Shl: return "<<";
+ case Shr: return ">>";
+ case LT: return "<";
+ case GT: return ">";
+ case LE: return "<=";
+ case GE: return ">=";
+ case EQ: return "==";
+ case NE: return "!=";
+ case And: return "&";
+ case Xor: return "^";
+ case Or: return "|";
+ case LAnd: return "&&";
+ case LOr: return "||";
+ case Assign: return "=";
+ case MulAssign: return "*=";
+ case DivAssign: return "/=";
+ case RemAssign: return "%=";
+ case AddAssign: return "+=";
+ case SubAssign: return "-=";
+ case ShlAssign: return "<<=";
+ case ShrAssign: return ">>=";
+ case AndAssign: return "&=";
+ case XorAssign: return "^=";
+ case OrAssign: return "|=";
+ case Comma: return ",";
+ }
+
+ return "";
+}
+
+BinaryOperator::Opcode
+BinaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO) {
+ switch (OO) {
+ default: assert(false && "Not an overloadable binary operator");
+ case OO_Plus: return Add;
+ case OO_Minus: return Sub;
+ case OO_Star: return Mul;
+ case OO_Slash: return Div;
+ case OO_Percent: return Rem;
+ case OO_Caret: return Xor;
+ case OO_Amp: return And;
+ case OO_Pipe: return Or;
+ case OO_Equal: return Assign;
+ case OO_Less: return LT;
+ case OO_Greater: return GT;
+ case OO_PlusEqual: return AddAssign;
+ case OO_MinusEqual: return SubAssign;
+ case OO_StarEqual: return MulAssign;
+ case OO_SlashEqual: return DivAssign;
+ case OO_PercentEqual: return RemAssign;
+ case OO_CaretEqual: return XorAssign;
+ case OO_AmpEqual: return AndAssign;
+ case OO_PipeEqual: return OrAssign;
+ case OO_LessLess: return Shl;
+ case OO_GreaterGreater: return Shr;
+ case OO_LessLessEqual: return ShlAssign;
+ case OO_GreaterGreaterEqual: return ShrAssign;
+ case OO_EqualEqual: return EQ;
+ case OO_ExclaimEqual: return NE;
+ case OO_LessEqual: return LE;
+ case OO_GreaterEqual: return GE;
+ case OO_AmpAmp: return LAnd;
+ case OO_PipePipe: return LOr;
+ case OO_Comma: return Comma;
+ case OO_ArrowStar: return PtrMemI;
+ }
+}
+
+OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
+ static const OverloadedOperatorKind OverOps[] = {
+ /* .* Cannot be overloaded */OO_None, OO_ArrowStar,
+ OO_Star, OO_Slash, OO_Percent,
+ OO_Plus, OO_Minus,
+ OO_LessLess, OO_GreaterGreater,
+ OO_Less, OO_Greater, OO_LessEqual, OO_GreaterEqual,
+ OO_EqualEqual, OO_ExclaimEqual,
+ OO_Amp,
+ OO_Caret,
+ OO_Pipe,
+ OO_AmpAmp,
+ OO_PipePipe,
+ OO_Equal, OO_StarEqual,
+ OO_SlashEqual, OO_PercentEqual,
+ OO_PlusEqual, OO_MinusEqual,
+ OO_LessLessEqual, OO_GreaterGreaterEqual,
+ OO_AmpEqual, OO_CaretEqual,
+ OO_PipeEqual,
+ OO_Comma
+ };
+ return OverOps[Opc];
+}
+
+InitListExpr::InitListExpr(SourceLocation lbraceloc,
+ Expr **initExprs, unsigned numInits,
+ SourceLocation rbraceloc)
+ : Expr(InitListExprClass, QualType(),
+ hasAnyTypeDependentArguments(initExprs, numInits),
+ hasAnyValueDependentArguments(initExprs, numInits)),
+ LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0),
+ UnionFieldInit(0), HadArrayRangeDesignator(false) {
+
+ InitExprs.insert(InitExprs.end(), initExprs, initExprs+numInits);
+}
+
+void InitListExpr::reserveInits(unsigned NumInits) {
+ if (NumInits > InitExprs.size())
+ InitExprs.reserve(NumInits);
+}
+
+void InitListExpr::resizeInits(ASTContext &Context, unsigned NumInits) {
+ for (unsigned Idx = NumInits, LastIdx = InitExprs.size();
+ Idx < LastIdx; ++Idx)
+ InitExprs[Idx]->Destroy(Context);
+ InitExprs.resize(NumInits, 0);
+}
+
+Expr *InitListExpr::updateInit(unsigned Init, Expr *expr) {
+ if (Init >= InitExprs.size()) {
+ InitExprs.insert(InitExprs.end(), Init - InitExprs.size() + 1, 0);
+ InitExprs.back() = expr;
+ return 0;
+ }
+
+ Expr *Result = cast_or_null<Expr>(InitExprs[Init]);
+ InitExprs[Init] = expr;
+ return Result;
+}
+
+/// getFunctionType - Return the underlying function type for this block.
+///
+const FunctionType *BlockExpr::getFunctionType() const {
+ return getType()->getAsBlockPointerType()->
+ getPointeeType()->getAsFunctionType();
+}
+
+SourceLocation BlockExpr::getCaretLocation() const {
+ return TheBlock->getCaretLocation();
+}
+const Stmt *BlockExpr::getBody() const {
+ return TheBlock->getBody();
+}
+Stmt *BlockExpr::getBody() {
+ return TheBlock->getBody();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Generic Expression Routines
+//===----------------------------------------------------------------------===//
+
+/// isUnusedResultAWarning - Return true if this immediate expression should
+/// be warned about if the result is unused. If so, fill in Loc and Ranges
+/// with location to warn on and the source range[s] to report with the
+/// warning.
+bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
+ SourceRange &R2) const {
+ // Don't warn if the expr is type dependent. The type could end up
+ // instantiating to void.
+ if (isTypeDependent())
+ return false;
+
+ switch (getStmtClass()) {
+ default:
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()->
+ isUnusedResultAWarning(Loc, R1, R2);
+ case UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(this);
+
+ switch (UO->getOpcode()) {
+ default: break;
+ case UnaryOperator::PostInc:
+ case UnaryOperator::PostDec:
+ case UnaryOperator::PreInc:
+ case UnaryOperator::PreDec: // ++/--
+ return false; // Not a warning.
+ case UnaryOperator::Deref:
+ // Dereferencing a volatile pointer is a side-effect.
+ if (getType().isVolatileQualified())
+ return false;
+ break;
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ // accessing a piece of a volatile complex is a side-effect.
+ if (UO->getSubExpr()->getType().isVolatileQualified())
+ return false;
+ break;
+ case UnaryOperator::Extension:
+ return UO->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2);
+ }
+ Loc = UO->getOperatorLoc();
+ R1 = UO->getSubExpr()->getSourceRange();
+ return true;
+ }
+ case BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(this);
+ // Consider comma to have side effects if the LHS or RHS does.
+ if (BO->getOpcode() == BinaryOperator::Comma)
+ return BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2) ||
+ BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2);
+
+ if (BO->isAssignmentOp())
+ return false;
+ Loc = BO->getOperatorLoc();
+ R1 = BO->getLHS()->getSourceRange();
+ R2 = BO->getRHS()->getSourceRange();
+ return true;
+ }
+ case CompoundAssignOperatorClass:
+ return false;
+
+ case ConditionalOperatorClass: {
+ // The condition must be evaluated, but if either the LHS or RHS is a
+ // warning, warn about them.
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(this);
+ if (Exp->getLHS() && Exp->getLHS()->isUnusedResultAWarning(Loc, R1, R2))
+ return true;
+ return Exp->getRHS()->isUnusedResultAWarning(Loc, R1, R2);
+ }
+
+ case MemberExprClass:
+ // If the base pointer or element is to a volatile pointer/field, accessing
+ // it is a side effect.
+ if (getType().isVolatileQualified())
+ return false;
+ Loc = cast<MemberExpr>(this)->getMemberLoc();
+ R1 = SourceRange(Loc, Loc);
+ R2 = cast<MemberExpr>(this)->getBase()->getSourceRange();
+ return true;
+
+ case ArraySubscriptExprClass:
+ // If the base pointer or element is to a volatile pointer/field, accessing
+ // it is a side effect.
+ if (getType().isVolatileQualified())
+ return false;
+ Loc = cast<ArraySubscriptExpr>(this)->getRBracketLoc();
+ R1 = cast<ArraySubscriptExpr>(this)->getLHS()->getSourceRange();
+ R2 = cast<ArraySubscriptExpr>(this)->getRHS()->getSourceRange();
+ return true;
+
+ case CallExprClass:
+ case CXXOperatorCallExprClass:
+ case CXXMemberCallExprClass: {
+ // If this is a direct call, get the callee.
+ const CallExpr *CE = cast<CallExpr>(this);
+ const Expr *CalleeExpr = CE->getCallee()->IgnoreParenCasts();
+ if (const DeclRefExpr *CalleeDRE = dyn_cast<DeclRefExpr>(CalleeExpr)) {
+ // If the callee has attribute pure, const, or warn_unused_result, warn
+ // about it. void foo() { strlen("bar"); } should warn.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeDRE->getDecl()))
+ if (FD->getAttr<WarnUnusedResultAttr>() ||
+ FD->getAttr<PureAttr>() || FD->getAttr<ConstAttr>()) {
+ Loc = CE->getCallee()->getLocStart();
+ R1 = CE->getCallee()->getSourceRange();
+
+ if (unsigned NumArgs = CE->getNumArgs())
+ R2 = SourceRange(CE->getArg(0)->getLocStart(),
+ CE->getArg(NumArgs-1)->getLocEnd());
+ return true;
+ }
+ }
+ return false;
+ }
+ case ObjCMessageExprClass:
+ return false;
+ case StmtExprClass: {
+ // Statement exprs don't logically have side effects themselves, but are
+ // sometimes used in macros in ways that give them a type that is unused.
+ // For example ({ blah; foo(); }) will end up with a type if foo has a type.
+ // however, if the result of the stmt expr is dead, we don't want to emit a
+ // warning.
+ const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt();
+ if (!CS->body_empty())
+ if (const Expr *E = dyn_cast<Expr>(CS->body_back()))
+ return E->isUnusedResultAWarning(Loc, R1, R2);
+
+ Loc = cast<StmtExpr>(this)->getLParenLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+ case CStyleCastExprClass:
+ // If this is a cast to void, check the operand. Otherwise, the result of
+ // the cast is unused.
+ if (getType()->isVoidType())
+ return cast<CastExpr>(this)->getSubExpr()->isUnusedResultAWarning(Loc,
+ R1, R2);
+ Loc = cast<CStyleCastExpr>(this)->getLParenLoc();
+ R1 = cast<CStyleCastExpr>(this)->getSubExpr()->getSourceRange();
+ return true;
+ case CXXFunctionalCastExprClass:
+ // If this is a cast to void, check the operand. Otherwise, the result of
+ // the cast is unused.
+ if (getType()->isVoidType())
+ return cast<CastExpr>(this)->getSubExpr()->isUnusedResultAWarning(Loc,
+ R1, R2);
+ Loc = cast<CXXFunctionalCastExpr>(this)->getTypeBeginLoc();
+ R1 = cast<CXXFunctionalCastExpr>(this)->getSubExpr()->getSourceRange();
+ return true;
+
+ case ImplicitCastExprClass:
+ // Check the operand, since implicit casts are inserted by Sema
+ return cast<ImplicitCastExpr>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2);
+
+ case CXXDefaultArgExprClass:
+ return cast<CXXDefaultArgExpr>(this)
+ ->getExpr()->isUnusedResultAWarning(Loc, R1, R2);
+
+ case CXXNewExprClass:
+ // FIXME: In theory, there might be new expressions that don't have side
+ // effects (e.g. a placement new with an uninitialized POD).
+ case CXXDeleteExprClass:
+ return false;
+ case CXXExprWithTemporariesClass:
+ return cast<CXXExprWithTemporaries>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2);
+ }
+}
+
+/// DeclCanBeLvalue - Determine whether the given declaration can be
+/// an lvalue. This is a helper routine for isLvalue.
+static bool DeclCanBeLvalue(const NamedDecl *Decl, ASTContext &Ctx) {
+ // C++ [temp.param]p6:
+ // A non-type non-reference template-parameter is not an lvalue.
+ if (const NonTypeTemplateParmDecl *NTTParm
+ = dyn_cast<NonTypeTemplateParmDecl>(Decl))
+ return NTTParm->getType()->isReferenceType();
+
+ return isa<VarDecl>(Decl) || isa<FieldDecl>(Decl) ||
+ // C++ 3.10p2: An lvalue refers to an object or function.
+ (Ctx.getLangOptions().CPlusPlus &&
+ (isa<FunctionDecl>(Decl) || isa<OverloadedFunctionDecl>(Decl)));
+}
+
+/// isLvalue - C99 6.3.2.1: an lvalue is an expression with an object type or an
+/// incomplete type other than void. Nonarray expressions that can be lvalues:
+/// - name, where name must be a variable
+/// - e[i]
+/// - (e), where e must be an lvalue
+/// - e.name, where e must be an lvalue
+/// - e->name
+/// - *e, the type of e cannot be a function type
+/// - string-constant
+/// - (__real__ e) and (__imag__ e) where e is an lvalue [GNU extension]
+/// - reference type [C++ [expr]]
+///
+Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const {
+ assert(!TR->isReferenceType() && "Expressions can't have reference type.");
+
+ isLvalueResult Res = isLvalueInternal(Ctx);
+ if (Res != LV_Valid || Ctx.getLangOptions().CPlusPlus)
+ return Res;
+
+ // first, check the type (C99 6.3.2.1). Expressions with function
+ // type in C are not lvalues, but they can be lvalues in C++.
+ if (TR->isFunctionType())
+ return LV_NotObjectType;
+
+ // Allow qualified void which is an incomplete type other than void (yuck).
+ if (TR->isVoidType() && !Ctx.getCanonicalType(TR).getCVRQualifiers())
+ return LV_IncompleteVoidType;
+
+ return LV_Valid;
+}
+
+// Check whether the expression can be sanely treated like an l-value
+Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const {
+ switch (getStmtClass()) {
+ case StringLiteralClass: // C99 6.5.1p4
+ case ObjCEncodeExprClass: // @encode behaves like its string in every way.
+ return LV_Valid;
+ case ArraySubscriptExprClass: // C99 6.5.3p4 (e1[e2] == (*((e1)+(e2))))
+ // For vectors, make sure base is an lvalue (i.e. not a function call).
+ if (cast<ArraySubscriptExpr>(this)->getBase()->getType()->isVectorType())
+ return cast<ArraySubscriptExpr>(this)->getBase()->isLvalue(Ctx);
+ return LV_Valid;
+ case DeclRefExprClass:
+ case QualifiedDeclRefExprClass: { // C99 6.5.1p2
+ const NamedDecl *RefdDecl = cast<DeclRefExpr>(this)->getDecl();
+ if (DeclCanBeLvalue(RefdDecl, Ctx))
+ return LV_Valid;
+ break;
+ }
+ case BlockDeclRefExprClass: {
+ const BlockDeclRefExpr *BDR = cast<BlockDeclRefExpr>(this);
+ if (isa<VarDecl>(BDR->getDecl()))
+ return LV_Valid;
+ break;
+ }
+ case MemberExprClass: {
+ const MemberExpr *m = cast<MemberExpr>(this);
+ if (Ctx.getLangOptions().CPlusPlus) { // C++ [expr.ref]p4:
+ NamedDecl *Member = m->getMemberDecl();
+ // C++ [expr.ref]p4:
+ // If E2 is declared to have type "reference to T", then E1.E2
+ // is an lvalue.
+ if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
+ if (Value->getType()->isReferenceType())
+ return LV_Valid;
+
+ // -- If E2 is a static data member [...] then E1.E2 is an lvalue.
+ if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
+ return LV_Valid;
+
+ // -- If E2 is a non-static data member [...]. If E1 is an
+ // lvalue, then E1.E2 is an lvalue.
+ if (isa<FieldDecl>(Member))
+ return m->isArrow() ? LV_Valid : m->getBase()->isLvalue(Ctx);
+
+ // -- If it refers to a static member function [...], then
+ // E1.E2 is an lvalue.
+ // -- Otherwise, if E1.E2 refers to a non-static member
+ // function [...], then E1.E2 is not an lvalue.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
+ return Method->isStatic()? LV_Valid : LV_MemberFunction;
+
+ // -- If E2 is a member enumerator [...], the expression E1.E2
+ // is not an lvalue.
+ if (isa<EnumConstantDecl>(Member))
+ return LV_InvalidExpression;
+
+ // Not an lvalue.
+ return LV_InvalidExpression;
+ }
+
+ // C99 6.5.2.3p4
+ return m->isArrow() ? LV_Valid : m->getBase()->isLvalue(Ctx);
+ }
+ case UnaryOperatorClass:
+ if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Deref)
+ return LV_Valid; // C99 6.5.3p4
+
+ if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Real ||
+ cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Imag ||
+ cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Extension)
+ return cast<UnaryOperator>(this)->getSubExpr()->isLvalue(Ctx); // GNU.
+
+ if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.pre.incr]p1
+ (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreInc ||
+ cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreDec))
+ return LV_Valid;
+ break;
+ case ImplicitCastExprClass:
+ return cast<ImplicitCastExpr>(this)->isLvalueCast()? LV_Valid
+ : LV_InvalidExpression;
+ case ParenExprClass: // C99 6.5.1p5
+ return cast<ParenExpr>(this)->getSubExpr()->isLvalue(Ctx);
+ case BinaryOperatorClass:
+ case CompoundAssignOperatorClass: {
+ const BinaryOperator *BinOp = cast<BinaryOperator>(this);
+
+ if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.comma]p1
+ BinOp->getOpcode() == BinaryOperator::Comma)
+ return BinOp->getRHS()->isLvalue(Ctx);
+
+ // C++ [expr.mptr.oper]p6
+ if ((BinOp->getOpcode() == BinaryOperator::PtrMemD ||
+ BinOp->getOpcode() == BinaryOperator::PtrMemI) &&
+ !BinOp->getType()->isFunctionType())
+ return BinOp->getLHS()->isLvalue(Ctx);
+
+ if (!BinOp->isAssignmentOp())
+ return LV_InvalidExpression;
+
+ if (Ctx.getLangOptions().CPlusPlus)
+ // C++ [expr.ass]p1:
+ // The result of an assignment operation [...] is an lvalue.
+ return LV_Valid;
+
+
+ // C99 6.5.16:
+ // An assignment expression [...] is not an lvalue.
+ return LV_InvalidExpression;
+ }
+ case CallExprClass:
+ case CXXOperatorCallExprClass:
+ case CXXMemberCallExprClass: {
+ // C++0x [expr.call]p10
+ // A function call is an lvalue if and only if the result type
+ // is an lvalue reference.
+ QualType ReturnType = cast<CallExpr>(this)->getCallReturnType();
+ if (ReturnType->isLValueReferenceType())
+ return LV_Valid;
+
+ break;
+ }
+ case CompoundLiteralExprClass: // C99 6.5.2.5p5
+ return LV_Valid;
+ case ChooseExprClass:
+ // __builtin_choose_expr is an lvalue if the selected operand is.
+ return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)->isLvalue(Ctx);
+ case ExtVectorElementExprClass:
+ if (cast<ExtVectorElementExpr>(this)->containsDuplicateElements())
+ return LV_DuplicateVectorComponents;
+ return LV_Valid;
+ case ObjCIvarRefExprClass: // ObjC instance variables are lvalues.
+ return LV_Valid;
+ case ObjCPropertyRefExprClass: // FIXME: check if read-only property.
+ return LV_Valid;
+ case ObjCKVCRefExprClass: // FIXME: check if read-only property.
+ return LV_Valid;
+ case PredefinedExprClass:
+ return LV_Valid;
+ case CXXDefaultArgExprClass:
+ return cast<CXXDefaultArgExpr>(this)->getExpr()->isLvalue(Ctx);
+ case CXXConditionDeclExprClass:
+ return LV_Valid;
+ case CStyleCastExprClass:
+ case CXXFunctionalCastExprClass:
+ case CXXStaticCastExprClass:
+ case CXXDynamicCastExprClass:
+ case CXXReinterpretCastExprClass:
+ case CXXConstCastExprClass:
+ // The result of an explicit cast is an lvalue if the type we are
+ // casting to is an lvalue reference type. See C++ [expr.cast]p1,
+ // C++ [expr.static.cast]p2, C++ [expr.dynamic.cast]p2,
+ // C++ [expr.reinterpret.cast]p1, C++ [expr.const.cast]p1.
+ if (cast<ExplicitCastExpr>(this)->getTypeAsWritten()->
+ isLValueReferenceType())
+ return LV_Valid;
+ break;
+ case CXXTypeidExprClass:
+ // C++ 5.2.8p1: The result of a typeid expression is an lvalue of ...
+ return LV_Valid;
+ case ConditionalOperatorClass: {
+ // Complicated handling is only for C++.
+ if (!Ctx.getLangOptions().CPlusPlus)
+ return LV_InvalidExpression;
+
+ // Sema should have taken care to ensure that a CXXTemporaryObjectExpr is
+ // everywhere there's an object converted to an rvalue. Also, any other
+ // casts should be wrapped by ImplicitCastExprs. There's just the special
+ // case involving throws to work out.
+ const ConditionalOperator *Cond = cast<ConditionalOperator>(this);
+ Expr *True = Cond->getTrueExpr();
+ Expr *False = Cond->getFalseExpr();
+ // C++0x 5.16p2
+ // If either the second or the third operand has type (cv) void, [...]
+ // the result [...] is an rvalue.
+ if (True->getType()->isVoidType() || False->getType()->isVoidType())
+ return LV_InvalidExpression;
+
+ // Both sides must be lvalues for the result to be an lvalue.
+ if (True->isLvalue(Ctx) != LV_Valid || False->isLvalue(Ctx) != LV_Valid)
+ return LV_InvalidExpression;
+
+ // That's it.
+ return LV_Valid;
+ }
+
+ default:
+ break;
+ }
+ return LV_InvalidExpression;
+}
+
+/// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
+/// does not have an incomplete type, does not have a const-qualified type, and
+/// if it is a structure or union, does not have any member (including,
+/// recursively, any member or element of all contained aggregates or unions)
+/// with a const-qualified type.
+Expr::isModifiableLvalueResult
+Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
+ isLvalueResult lvalResult = isLvalue(Ctx);
+
+ switch (lvalResult) {
+ case LV_Valid:
+ // C++ 3.10p11: Functions cannot be modified, but pointers to
+ // functions can be modifiable.
+ if (Ctx.getLangOptions().CPlusPlus && TR->isFunctionType())
+ return MLV_NotObjectType;
+ break;
+
+ case LV_NotObjectType: return MLV_NotObjectType;
+ case LV_IncompleteVoidType: return MLV_IncompleteVoidType;
+ case LV_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
+ case LV_InvalidExpression:
+ // If the top level is a C-style cast, and the subexpression is a valid
+ // lvalue, then this is probably a use of the old-school "cast as lvalue"
+ // GCC extension. We don't support it, but we want to produce good
+ // diagnostics when it happens so that the user knows why.
+ if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(IgnoreParens())) {
+ if (CE->getSubExpr()->isLvalue(Ctx) == LV_Valid) {
+ if (Loc)
+ *Loc = CE->getLParenLoc();
+ return MLV_LValueCast;
+ }
+ }
+ return MLV_InvalidExpression;
+ case LV_MemberFunction: return MLV_MemberFunction;
+ }
+
+ // The following is illegal:
+ // void takeclosure(void (^C)(void));
+ // void func() { int x = 1; takeclosure(^{ x = 7; }); }
+ //
+ if (isa<BlockDeclRefExpr>(this)) {
+ const BlockDeclRefExpr *BDR = cast<BlockDeclRefExpr>(this);
+ if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl()))
+ return MLV_NotBlockQualified;
+ }
+
+ QualType CT = Ctx.getCanonicalType(getType());
+
+ if (CT.isConstQualified())
+ return MLV_ConstQualified;
+ if (CT->isArrayType())
+ return MLV_ArrayType;
+ if (CT->isIncompleteType())
+ return MLV_IncompleteType;
+
+ if (const RecordType *r = CT->getAsRecordType()) {
+ if (r->hasConstFields())
+ return MLV_ConstQualified;
+ }
+
+ // Assigning to an 'implicit' property?
+ else if (isa<ObjCKVCRefExpr>(this)) {
+ const ObjCKVCRefExpr* KVCExpr = cast<ObjCKVCRefExpr>(this);
+ if (KVCExpr->getSetterMethod() == 0)
+ return MLV_NoSetterProperty;
+ }
+ return MLV_Valid;
+}
+
+/// hasGlobalStorage - Return true if this expression has static storage
+/// duration. This means that the address of this expression is a link-time
+/// constant.
+bool Expr::hasGlobalStorage() const {
+ switch (getStmtClass()) {
+ default:
+ return false;
+ case BlockExprClass:
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()->hasGlobalStorage();
+ case ImplicitCastExprClass:
+ return cast<ImplicitCastExpr>(this)->getSubExpr()->hasGlobalStorage();
+ case CompoundLiteralExprClass:
+ return cast<CompoundLiteralExpr>(this)->isFileScope();
+ case DeclRefExprClass:
+ case QualifiedDeclRefExprClass: {
+ const Decl *D = cast<DeclRefExpr>(this)->getDecl();
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->hasGlobalStorage();
+ if (isa<FunctionDecl>(D))
+ return true;
+ return false;
+ }
+ case MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(this);
+ return !M->isArrow() && M->getBase()->hasGlobalStorage();
+ }
+ case ArraySubscriptExprClass:
+ return cast<ArraySubscriptExpr>(this)->getBase()->hasGlobalStorage();
+ case PredefinedExprClass:
+ return true;
+ case CXXDefaultArgExprClass:
+ return cast<CXXDefaultArgExpr>(this)->getExpr()->hasGlobalStorage();
+ }
+}
+
+/// isOBJCGCCandidate - Check if an expression is objc gc'able.
+///
+bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
+ switch (getStmtClass()) {
+ default:
+ return false;
+ case ObjCIvarRefExprClass:
+ return true;
+ case Expr::UnaryOperatorClass:
+ return cast<UnaryOperator>(this)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case ImplicitCastExprClass:
+ return cast<ImplicitCastExpr>(this)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case CStyleCastExprClass:
+ return cast<CStyleCastExpr>(this)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case DeclRefExprClass:
+ case QualifiedDeclRefExprClass: {
+ const Decl *D = cast<DeclRefExpr>(this)->getDecl();
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasGlobalStorage())
+ return true;
+ QualType T = VD->getType();
+ // dereferencing to an object pointer is always a gc'able candidate
+ if (T->isPointerType() &&
+ Ctx.isObjCObjectPointerType(T->getAsPointerType()->getPointeeType()))
+ return true;
+
+ }
+ return false;
+ }
+ case MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(this);
+ return M->getBase()->isOBJCGCCandidate(Ctx);
+ }
+ case ArraySubscriptExprClass:
+ return cast<ArraySubscriptExpr>(this)->getBase()->isOBJCGCCandidate(Ctx);
+ }
+}
+Expr* Expr::IgnoreParens() {
+ Expr* E = this;
+ while (ParenExpr* P = dyn_cast<ParenExpr>(E))
+ E = P->getSubExpr();
+
+ return E;
+}
+
+/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
+/// or CastExprs or ImplicitCastExprs, returning their operand.
+Expr *Expr::IgnoreParenCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E))
+ E = P->getSubExpr();
+ else if (CastExpr *P = dyn_cast<CastExpr>(E))
+ E = P->getSubExpr();
+ else
+ return E;
+ }
+}
+
+/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
+/// value (including ptr->int casts of the same size). Strip off any
+/// ParenExpr or CastExprs, returning their operand.
+Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ // We ignore integer <-> casts that are of the same width, ptr<->ptr and
+ // ptr<->int casts of the same width. We also ignore all identify casts.
+ Expr *SE = P->getSubExpr();
+
+ if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
+ E = SE;
+ continue;
+ }
+
+ if ((E->getType()->isPointerType() || E->getType()->isIntegralType()) &&
+ (SE->getType()->isPointerType() || SE->getType()->isIntegralType()) &&
+ Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
+ E = SE;
+ continue;
+ }
+ }
+
+ return E;
+ }
+}
+
+
+/// hasAnyTypeDependentArguments - Determines if any of the expressions
+/// in Exprs is type-dependent.
+bool Expr::hasAnyTypeDependentArguments(Expr** Exprs, unsigned NumExprs) {
+ for (unsigned I = 0; I < NumExprs; ++I)
+ if (Exprs[I]->isTypeDependent())
+ return true;
+
+ return false;
+}
+
+/// hasAnyValueDependentArguments - Determines if any of the expressions
+/// in Exprs is value-dependent.
+bool Expr::hasAnyValueDependentArguments(Expr** Exprs, unsigned NumExprs) {
+ for (unsigned I = 0; I < NumExprs; ++I)
+ if (Exprs[I]->isValueDependent())
+ return true;
+
+ return false;
+}
+
+bool Expr::isConstantInitializer(ASTContext &Ctx) const {
+ // This function is attempting whether an expression is an initializer
+ // which can be evaluated at compile-time. isEvaluatable handles most
+ // of the cases, but it can't deal with some initializer-specific
+ // expressions, and it can't deal with aggregates; we deal with those here,
+ // and fall back to isEvaluatable for the other cases.
+
+ // FIXME: This function assumes the variable being assigned to
+ // isn't a reference type!
+
+ switch (getStmtClass()) {
+ default: break;
+ case StringLiteralClass:
+ case ObjCEncodeExprClass:
+ return true;
+ case CompoundLiteralExprClass: {
+ // This handles gcc's extension that allows global initializers like
+ // "struct x {int x;} x = (struct x) {};".
+ // FIXME: This accepts other cases it shouldn't!
+ const Expr *Exp = cast<CompoundLiteralExpr>(this)->getInitializer();
+ return Exp->isConstantInitializer(Ctx);
+ }
+ case InitListExprClass: {
+ // FIXME: This doesn't deal with fields with reference types correctly.
+ // FIXME: This incorrectly allows pointers cast to integers to be assigned
+ // to bitfields.
+ const InitListExpr *Exp = cast<InitListExpr>(this);
+ unsigned numInits = Exp->getNumInits();
+ for (unsigned i = 0; i < numInits; i++) {
+ if (!Exp->getInit(i)->isConstantInitializer(Ctx))
+ return false;
+ }
+ return true;
+ }
+ case ImplicitValueInitExprClass:
+ return true;
+ case ParenExprClass: {
+ return cast<ParenExpr>(this)->getSubExpr()->isConstantInitializer(Ctx);
+ }
+ case UnaryOperatorClass: {
+ const UnaryOperator* Exp = cast<UnaryOperator>(this);
+ if (Exp->getOpcode() == UnaryOperator::Extension)
+ return Exp->getSubExpr()->isConstantInitializer(Ctx);
+ break;
+ }
+ case ImplicitCastExprClass:
+ case CStyleCastExprClass:
+ // Handle casts with a destination that's a struct or union; this
+ // deals with both the gcc no-op struct cast extension and the
+ // cast-to-union extension.
+ if (getType()->isRecordType())
+ return cast<CastExpr>(this)->getSubExpr()->isConstantInitializer(Ctx);
+ break;
+ }
+
+ return isEvaluatable(Ctx);
+}
+
+/// isIntegerConstantExpr - this recursive routine will test if an expression is
+/// an integer constant expression.
+
+/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
+/// comma, etc
+///
+/// FIXME: Handle offsetof. Two things to do: Handle GCC's __builtin_offsetof
+/// to support gcc 4.0+ and handle the idiom GCC recognizes with a null pointer
+/// cast+dereference.
+
+// CheckICE - This function does the fundamental ICE checking: the returned
+// ICEDiag contains a Val of 0, 1, or 2, and a possibly null SourceLocation.
+// Note that to reduce code duplication, this helper does no evaluation
+// itself; the caller checks whether the expression is evaluatable, and
+// in the rare cases where CheckICE actually cares about the evaluated
+// value, it calls into Evalute.
+//
+// Meanings of Val:
+// 0: This expression is an ICE if it can be evaluated by Evaluate.
+// 1: This expression is not an ICE, but if it isn't evaluated, it's
+// a legal subexpression for an ICE. This return value is used to handle
+// the comma operator in C99 mode.
+// 2: This expression is not an ICE, and is not a legal subexpression for one.
+
+struct ICEDiag {
+ unsigned Val;
+ SourceLocation Loc;
+
+ public:
+ ICEDiag(unsigned v, SourceLocation l) : Val(v), Loc(l) {}
+ ICEDiag() : Val(0) {}
+};
+
+ICEDiag NoDiag() { return ICEDiag(); }
+
+static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
+ Expr::EvalResult EVResult;
+ if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
+ !EVResult.Val.isInt()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+ return NoDiag();
+}
+
+static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
+ assert(!E->isValueDependent() && "Should not see value dependent exprs!");
+ if (!E->getType()->isIntegralType()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+
+ switch (E->getStmtClass()) {
+ default:
+ return ICEDiag(2, E->getLocStart());
+ case Expr::ParenExprClass:
+ return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXZeroInitValueExprClass:
+ case Expr::TypesCompatibleExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ return NoDiag();
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+ if (CE->isBuiltinCall(Ctx))
+ return CheckEvalInICE(E, Ctx);
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::DeclRefExprClass:
+ case Expr::QualifiedDeclRefExprClass:
+ if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
+ return NoDiag();
+ if (Ctx.getLangOptions().CPlusPlus &&
+ E->getType().getCVRQualifiers() == QualType::Const) {
+ // C++ 7.1.5.1p2
+ // A variable of non-volatile const-qualified integral or enumeration
+ // type initialized by an ICE can be used in ICEs.
+ if (const VarDecl *Dcl =
+ dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
+ if (Dcl->isInitKnownICE()) {
+ // We have already checked whether this subexpression is an
+ // integral constant expression.
+ if (Dcl->isInitICE())
+ return NoDiag();
+ else
+ return ICEDiag(2, E->getLocStart());
+ }
+
+ if (const Expr *Init = Dcl->getInit()) {
+ ICEDiag Result = CheckICE(Init, Ctx);
+ // Cache the result of the ICE test.
+ Dcl->setInitKnownICE(Ctx, Result.Val == 0);
+ return Result;
+ }
+ }
+ }
+ return ICEDiag(2, E->getLocStart());
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *Exp = cast<UnaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ default:
+ return ICEDiag(2, E->getLocStart());
+ case UnaryOperator::Extension:
+ case UnaryOperator::LNot:
+ case UnaryOperator::Plus:
+ case UnaryOperator::Minus:
+ case UnaryOperator::Not:
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ return CheckICE(Exp->getSubExpr(), Ctx);
+ case UnaryOperator::OffsetOf:
+ // Note that per C99, offsetof must be an ICE. And AFAIK, using
+ // Evaluate matches the proposed gcc behavior for cases like
+ // "offsetof(struct s{int x[4];}, x[!.0])". This doesn't affect
+ // compliance: we should warn earlier for offsetof expressions with
+ // array subscripts that aren't ICEs, and if the array subscripts
+ // are ICEs, the value of the offsetof must be an integer constant.
+ return CheckEvalInICE(E, Ctx);
+ }
+ }
+ case Expr::SizeOfAlignOfExprClass: {
+ const SizeOfAlignOfExpr *Exp = cast<SizeOfAlignOfExpr>(E);
+ if (Exp->isSizeOf() && Exp->getTypeOfArgument()->isVariableArrayType())
+ return ICEDiag(2, E->getLocStart());
+ return NoDiag();
+ }
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *Exp = cast<BinaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ default:
+ return ICEDiag(2, E->getLocStart());
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
+ case BinaryOperator::Comma: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (Exp->getOpcode() == BinaryOperator::Div ||
+ Exp->getOpcode() == BinaryOperator::Rem) {
+ // Evaluate gives an error for undefined Div/Rem, so make sure
+ // we don't evaluate one.
+ if (LHSResult.Val != 2 && RHSResult.Val != 2) {
+ llvm::APSInt REval = Exp->getRHS()->EvaluateAsInt(Ctx);
+ if (REval == 0)
+ return ICEDiag(1, E->getLocStart());
+ if (REval.isSigned() && REval.isAllOnesValue()) {
+ llvm::APSInt LEval = Exp->getLHS()->EvaluateAsInt(Ctx);
+ if (LEval.isMinSignedValue())
+ return ICEDiag(1, E->getLocStart());
+ }
+ }
+ }
+ if (Exp->getOpcode() == BinaryOperator::Comma) {
+ if (Ctx.getLangOptions().C99) {
+ // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
+ // if it isn't evaluated.
+ if (LHSResult.Val == 0 && RHSResult.Val == 0)
+ return ICEDiag(1, E->getLocStart());
+ } else {
+ // In both C89 and C++, commas in ICEs are illegal.
+ return ICEDiag(2, E->getLocStart());
+ }
+ }
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (LHSResult.Val == 0 && RHSResult.Val == 1) {
+ // Rare case where the RHS has a comma "side-effect"; we need
+ // to actually check the condition to see whether the side
+ // with the comma is evaluated.
+ if ((Exp->getOpcode() == BinaryOperator::LAnd) !=
+ (Exp->getLHS()->EvaluateAsInt(Ctx) == 0))
+ return RHSResult;
+ return NoDiag();
+ }
+
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ }
+ }
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass: {
+ const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
+ if (SubExpr->getType()->isIntegralType())
+ return CheckICE(SubExpr, Ctx);
+ if (isa<FloatingLiteral>(SubExpr->IgnoreParens()))
+ return NoDiag();
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // then only the true side is actually considered in an integer constant
+ // expression, and it is fully evaluated. This is an important GNU
+ // extension. See GCC PR38377 for discussion.
+ if (const CallExpr *CallCE = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
+ if (CallCE->isBuiltinCall(Ctx) == Builtin::BI__builtin_constant_p) {
+ Expr::EvalResult EVResult;
+ if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
+ !EVResult.Val.isInt()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+ return NoDiag();
+ }
+ ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
+ ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+ if (CondResult.Val == 2)
+ return CondResult;
+ if (TrueResult.Val == 2)
+ return TrueResult;
+ if (FalseResult.Val == 2)
+ return FalseResult;
+ if (CondResult.Val == 1)
+ return CondResult;
+ if (TrueResult.Val == 0 && FalseResult.Val == 0)
+ return NoDiag();
+ // Rare case where the diagnostics depend on which side is evaluated
+ // Note that if we get here, CondResult is 0, and at least one of
+ // TrueResult and FalseResult is non-zero.
+ if (Exp->getCond()->EvaluateAsInt(Ctx) == 0) {
+ return FalseResult;
+ }
+ return TrueResult;
+ }
+ case Expr::CXXDefaultArgExprClass:
+ return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
+ case Expr::ChooseExprClass: {
+ return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(Ctx), Ctx);
+ }
+ }
+}
+
+bool Expr::isIntegerConstantExpr(llvm::APSInt &Result, ASTContext &Ctx,
+ SourceLocation *Loc, bool isEvaluated) const {
+ ICEDiag d = CheckICE(this, Ctx);
+ if (d.Val != 0) {
+ if (Loc) *Loc = d.Loc;
+ return false;
+ }
+ EvalResult EvalResult;
+ if (!Evaluate(EvalResult, Ctx))
+ assert(0 && "ICE cannot be evaluated!");
+ assert(!EvalResult.HasSideEffects && "ICE with side effects!");
+ assert(EvalResult.Val.isInt() && "ICE that isn't integer!");
+ Result = EvalResult.Val.getInt();
+ return true;
+}
+
+/// isNullPointerConstant - C99 6.3.2.3p3 - Return true if this is either an
+/// integer constant expression with the value zero, or if this is one that is
+/// cast to void*.
+bool Expr::isNullPointerConstant(ASTContext &Ctx) const
+{
+ // Strip off a cast to void*, if it exists. Except in C++.
+ if (const ExplicitCastExpr *CE = dyn_cast<ExplicitCastExpr>(this)) {
+ if (!Ctx.getLangOptions().CPlusPlus) {
+ // Check that it is a cast to void*.
+ if (const PointerType *PT = CE->getType()->getAsPointerType()) {
+ QualType Pointee = PT->getPointeeType();
+ if (Pointee.getCVRQualifiers() == 0 &&
+ Pointee->isVoidType() && // to void*
+ CE->getSubExpr()->getType()->isIntegerType()) // from int.
+ return CE->getSubExpr()->isNullPointerConstant(Ctx);
+ }
+ }
+ } else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(this)) {
+ // Ignore the ImplicitCastExpr type entirely.
+ return ICE->getSubExpr()->isNullPointerConstant(Ctx);
+ } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(this)) {
+ // Accept ((void*)0) as a null pointer constant, as many other
+ // implementations do.
+ return PE->getSubExpr()->isNullPointerConstant(Ctx);
+ } else if (const CXXDefaultArgExpr *DefaultArg
+ = dyn_cast<CXXDefaultArgExpr>(this)) {
+ // See through default argument expressions
+ return DefaultArg->getExpr()->isNullPointerConstant(Ctx);
+ } else if (isa<GNUNullExpr>(this)) {
+ // The GNU __null extension is always a null pointer constant.
+ return true;
+ }
+
+ // C++0x nullptr_t is always a null pointer constant.
+ if (getType()->isNullPtrType())
+ return true;
+
+ // This expression must be an integer type.
+ if (!getType()->isIntegerType())
+ return false;
+
+ // If we have an integer constant expression, we need to *evaluate* it and
+ // test for the value 0.
+ llvm::APSInt Result;
+ return isIntegerConstantExpr(Result, Ctx) && Result == 0;
+}
+
+FieldDecl *Expr::getBitField() {
+ Expr *E = this->IgnoreParenCasts();
+
+ if (MemberExpr *MemRef = dyn_cast<MemberExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(MemRef->getMemberDecl()))
+ if (Field->isBitField())
+ return Field;
+
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E))
+ if (BinOp->isAssignmentOp() && BinOp->getLHS())
+ return BinOp->getLHS()->getBitField();
+
+ return 0;
+}
+
+/// isArrow - Return true if the base expression is a pointer to vector,
+/// return false if the base expression is a vector.
+bool ExtVectorElementExpr::isArrow() const {
+ return getBase()->getType()->isPointerType();
+}
+
+unsigned ExtVectorElementExpr::getNumElements() const {
+ if (const VectorType *VT = getType()->getAsVectorType())
+ return VT->getNumElements();
+ return 1;
+}
+
+/// containsDuplicateElements - Return true if any element access is repeated.
+bool ExtVectorElementExpr::containsDuplicateElements() const {
+ const char *compStr = Accessor->getName();
+ unsigned length = Accessor->getLength();
+
+ // Halving swizzles do not contain duplicate elements.
+ if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") ||
+ !strcmp(compStr, "even") || !strcmp(compStr, "odd"))
+ return false;
+
+ // Advance past s-char prefix on hex swizzles.
+ if (*compStr == 's') {
+ compStr++;
+ length--;
+ }
+
+ for (unsigned i = 0; i != length-1; i++) {
+ const char *s = compStr+i;
+ for (const char c = *s++; *s; s++)
+ if (c == *s)
+ return true;
+ }
+ return false;
+}
+
+/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray.
+void ExtVectorElementExpr::getEncodedElementAccess(
+ llvm::SmallVectorImpl<unsigned> &Elts) const {
+ const char *compStr = Accessor->getName();
+ if (*compStr == 's')
+ compStr++;
+
+ bool isHi = !strcmp(compStr, "hi");
+ bool isLo = !strcmp(compStr, "lo");
+ bool isEven = !strcmp(compStr, "even");
+ bool isOdd = !strcmp(compStr, "odd");
+
+ for (unsigned i = 0, e = getNumElements(); i != e; ++i) {
+ uint64_t Index;
+
+ if (isHi)
+ Index = e + i;
+ else if (isLo)
+ Index = i;
+ else if (isEven)
+ Index = 2 * i;
+ else if (isOdd)
+ Index = 2 * i + 1;
+ else
+ Index = ExtVectorType::getAccessorIdx(compStr[i]);
+
+ Elts.push_back(Index);
+ }
+}
+
+// constructor for instance messages.
+ObjCMessageExpr::ObjCMessageExpr(Expr *receiver, Selector selInfo,
+ QualType retType, ObjCMethodDecl *mproto,
+ SourceLocation LBrac, SourceLocation RBrac,
+ Expr **ArgExprs, unsigned nargs)
+ : Expr(ObjCMessageExprClass, retType), SelName(selInfo),
+ MethodProto(mproto) {
+ NumArgs = nargs;
+ SubExprs = new Stmt*[NumArgs+1];
+ SubExprs[RECEIVER] = receiver;
+ if (NumArgs) {
+ for (unsigned i = 0; i != NumArgs; ++i)
+ SubExprs[i+ARGS_START] = static_cast<Expr *>(ArgExprs[i]);
+ }
+ LBracloc = LBrac;
+ RBracloc = RBrac;
+}
+
+// constructor for class messages.
+// FIXME: clsName should be typed to ObjCInterfaceType
+ObjCMessageExpr::ObjCMessageExpr(IdentifierInfo *clsName, Selector selInfo,
+ QualType retType, ObjCMethodDecl *mproto,
+ SourceLocation LBrac, SourceLocation RBrac,
+ Expr **ArgExprs, unsigned nargs)
+ : Expr(ObjCMessageExprClass, retType), SelName(selInfo),
+ MethodProto(mproto) {
+ NumArgs = nargs;
+ SubExprs = new Stmt*[NumArgs+1];
+ SubExprs[RECEIVER] = (Expr*) ((uintptr_t) clsName | IsClsMethDeclUnknown);
+ if (NumArgs) {
+ for (unsigned i = 0; i != NumArgs; ++i)
+ SubExprs[i+ARGS_START] = static_cast<Expr *>(ArgExprs[i]);
+ }
+ LBracloc = LBrac;
+ RBracloc = RBrac;
+}
+
+// constructor for class messages.
+ObjCMessageExpr::ObjCMessageExpr(ObjCInterfaceDecl *cls, Selector selInfo,
+ QualType retType, ObjCMethodDecl *mproto,
+ SourceLocation LBrac, SourceLocation RBrac,
+ Expr **ArgExprs, unsigned nargs)
+: Expr(ObjCMessageExprClass, retType), SelName(selInfo),
+MethodProto(mproto) {
+ NumArgs = nargs;
+ SubExprs = new Stmt*[NumArgs+1];
+ SubExprs[RECEIVER] = (Expr*) ((uintptr_t) cls | IsClsMethDeclKnown);
+ if (NumArgs) {
+ for (unsigned i = 0; i != NumArgs; ++i)
+ SubExprs[i+ARGS_START] = static_cast<Expr *>(ArgExprs[i]);
+ }
+ LBracloc = LBrac;
+ RBracloc = RBrac;
+}
+
+ObjCMessageExpr::ClassInfo ObjCMessageExpr::getClassInfo() const {
+ uintptr_t x = (uintptr_t) SubExprs[RECEIVER];
+ switch (x & Flags) {
+ default:
+ assert(false && "Invalid ObjCMessageExpr.");
+ case IsInstMeth:
+ return ClassInfo(0, 0);
+ case IsClsMethDeclUnknown:
+ return ClassInfo(0, (IdentifierInfo*) (x & ~Flags));
+ case IsClsMethDeclKnown: {
+ ObjCInterfaceDecl* D = (ObjCInterfaceDecl*) (x & ~Flags);
+ return ClassInfo(D, D->getIdentifier());
+ }
+ }
+}
+
+void ObjCMessageExpr::setClassInfo(const ObjCMessageExpr::ClassInfo &CI) {
+ if (CI.first == 0 && CI.second == 0)
+ SubExprs[RECEIVER] = (Expr*)((uintptr_t)0 | IsInstMeth);
+ else if (CI.first == 0)
+ SubExprs[RECEIVER] = (Expr*)((uintptr_t)CI.second | IsClsMethDeclUnknown);
+ else
+ SubExprs[RECEIVER] = (Expr*)((uintptr_t)CI.first | IsClsMethDeclKnown);
+}
+
+
+bool ChooseExpr::isConditionTrue(ASTContext &C) const {
+ return getCond()->EvaluateAsInt(C) != 0;
+}
+
+void ShuffleVectorExpr::setExprs(Expr ** Exprs, unsigned NumExprs) {
+ if (NumExprs)
+ delete [] SubExprs;
+
+ SubExprs = new Stmt* [NumExprs];
+ this->NumExprs = NumExprs;
+ memcpy(SubExprs, Exprs, sizeof(Expr *) * NumExprs);
+}
+
+void SizeOfAlignOfExpr::Destroy(ASTContext& C) {
+ // Override default behavior of traversing children. If this has a type
+ // operand and the type is a variable-length array, the child iteration
+ // will iterate over the size expression. However, this expression belongs
+ // to the type, not to this, so we don't want to delete it.
+ // We still want to delete this expression.
+ if (isArgumentType()) {
+ this->~SizeOfAlignOfExpr();
+ C.Deallocate(this);
+ }
+ else
+ Expr::Destroy(C);
+}
+
+//===----------------------------------------------------------------------===//
+// DesignatedInitExpr
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ if (Field.NameOrField & 0x01)
+ return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01);
+ else
+ return getField()->getIdentifier();
+}
+
+DesignatedInitExpr::DesignatedInitExpr(QualType Ty, unsigned NumDesignators,
+ const Designator *Designators,
+ SourceLocation EqualOrColonLoc,
+ bool GNUSyntax,
+ Expr **IndexExprs,
+ unsigned NumIndexExprs,
+ Expr *Init)
+ : Expr(DesignatedInitExprClass, Ty,
+ Init->isTypeDependent(), Init->isValueDependent()),
+ EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
+ NumDesignators(NumDesignators), NumSubExprs(NumIndexExprs + 1) {
+ this->Designators = new Designator[NumDesignators];
+
+ // Record the initializer itself.
+ child_iterator Child = child_begin();
+ *Child++ = Init;
+
+ // Copy the designators and their subexpressions, computing
+ // value-dependence along the way.
+ unsigned IndexIdx = 0;
+ for (unsigned I = 0; I != NumDesignators; ++I) {
+ this->Designators[I] = Designators[I];
+
+ if (this->Designators[I].isArrayDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Index = IndexExprs[IndexIdx];
+ ValueDependent = ValueDependent ||
+ Index->isTypeDependent() || Index->isValueDependent();
+
+ // Copy the index expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ } else if (this->Designators[I].isArrayRangeDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Start = IndexExprs[IndexIdx];
+ Expr *End = IndexExprs[IndexIdx + 1];
+ ValueDependent = ValueDependent ||
+ Start->isTypeDependent() || Start->isValueDependent() ||
+ End->isTypeDependent() || End->isValueDependent();
+
+ // Copy the start/end expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ *Child++ = IndexExprs[IndexIdx++];
+ }
+ }
+
+ assert(IndexIdx == NumIndexExprs && "Wrong number of index expressions");
+}
+
+DesignatedInitExpr *
+DesignatedInitExpr::Create(ASTContext &C, Designator *Designators,
+ unsigned NumDesignators,
+ Expr **IndexExprs, unsigned NumIndexExprs,
+ SourceLocation ColonOrEqualLoc,
+ bool UsesColonSyntax, Expr *Init) {
+ void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
+ sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ return new (Mem) DesignatedInitExpr(C.VoidTy, NumDesignators, Designators,
+ ColonOrEqualLoc, UsesColonSyntax,
+ IndexExprs, NumIndexExprs, Init);
+}
+
+DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(ASTContext &C,
+ unsigned NumIndexExprs) {
+ void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
+ sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
+}
+
+void DesignatedInitExpr::setDesignators(const Designator *Desigs,
+ unsigned NumDesigs) {
+ if (Designators)
+ delete [] Designators;
+
+ Designators = new Designator[NumDesigs];
+ NumDesignators = NumDesigs;
+ for (unsigned I = 0; I != NumDesigs; ++I)
+ Designators[I] = Desigs[I];
+}
+
+SourceRange DesignatedInitExpr::getSourceRange() const {
+ SourceLocation StartLoc;
+ Designator &First =
+ *const_cast<DesignatedInitExpr*>(this)->designators_begin();
+ if (First.isFieldDesignator()) {
+ if (GNUSyntax)
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.FieldLoc);
+ else
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.DotLoc);
+ } else
+ StartLoc =
+ SourceLocation::getFromRawEncoding(First.ArrayOrRange.LBracketLoc);
+ return SourceRange(StartLoc, getInit()->getSourceRange().getEnd());
+}
+
+Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) {
+ assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+}
+
+Expr *DesignatedInitExpr::getArrayRangeStart(const Designator& D) {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+}
+
+Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator& D) {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
+}
+
+/// \brief Replaces the designator at index @p Idx with the series
+/// of designators in [First, Last).
+void DesignatedInitExpr::ExpandDesignator(unsigned Idx,
+ const Designator *First,
+ const Designator *Last) {
+ unsigned NumNewDesignators = Last - First;
+ if (NumNewDesignators == 0) {
+ std::copy_backward(Designators + Idx + 1,
+ Designators + NumDesignators,
+ Designators + Idx);
+ --NumNewDesignators;
+ return;
+ } else if (NumNewDesignators == 1) {
+ Designators[Idx] = *First;
+ return;
+ }
+
+ Designator *NewDesignators
+ = new Designator[NumDesignators - 1 + NumNewDesignators];
+ std::copy(Designators, Designators + Idx, NewDesignators);
+ std::copy(First, Last, NewDesignators + Idx);
+ std::copy(Designators + Idx + 1, Designators + NumDesignators,
+ NewDesignators + Idx + NumNewDesignators);
+ delete [] Designators;
+ Designators = NewDesignators;
+ NumDesignators = NumDesignators - 1 + NumNewDesignators;
+}
+
+void DesignatedInitExpr::Destroy(ASTContext &C) {
+ delete [] Designators;
+ Expr::Destroy(C);
+}
+
+ImplicitValueInitExpr *ImplicitValueInitExpr::Clone(ASTContext &C) const {
+ return new (C) ImplicitValueInitExpr(getType());
+}
+
+//===----------------------------------------------------------------------===//
+// ExprIterator.
+//===----------------------------------------------------------------------===//
+
+Expr* ExprIterator::operator[](size_t idx) { return cast<Expr>(I[idx]); }
+Expr* ExprIterator::operator*() const { return cast<Expr>(*I); }
+Expr* ExprIterator::operator->() const { return cast<Expr>(*I); }
+const Expr* ConstExprIterator::operator[](size_t idx) const {
+ return cast<Expr>(I[idx]);
+}
+const Expr* ConstExprIterator::operator*() const { return cast<Expr>(*I); }
+const Expr* ConstExprIterator::operator->() const { return cast<Expr>(*I); }
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+// DeclRefExpr
+Stmt::child_iterator DeclRefExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator DeclRefExpr::child_end() { return child_iterator(); }
+
+// ObjCIvarRefExpr
+Stmt::child_iterator ObjCIvarRefExpr::child_begin() { return &Base; }
+Stmt::child_iterator ObjCIvarRefExpr::child_end() { return &Base+1; }
+
+// ObjCPropertyRefExpr
+Stmt::child_iterator ObjCPropertyRefExpr::child_begin() { return &Base; }
+Stmt::child_iterator ObjCPropertyRefExpr::child_end() { return &Base+1; }
+
+// ObjCKVCRefExpr
+Stmt::child_iterator ObjCKVCRefExpr::child_begin() { return &Base; }
+Stmt::child_iterator ObjCKVCRefExpr::child_end() { return &Base+1; }
+
+// ObjCSuperExpr
+Stmt::child_iterator ObjCSuperExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator ObjCSuperExpr::child_end() { return child_iterator(); }
+
+// PredefinedExpr
+Stmt::child_iterator PredefinedExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator PredefinedExpr::child_end() { return child_iterator(); }
+
+// IntegerLiteral
+Stmt::child_iterator IntegerLiteral::child_begin() { return child_iterator(); }
+Stmt::child_iterator IntegerLiteral::child_end() { return child_iterator(); }
+
+// CharacterLiteral
+Stmt::child_iterator CharacterLiteral::child_begin() { return child_iterator();}
+Stmt::child_iterator CharacterLiteral::child_end() { return child_iterator(); }
+
+// FloatingLiteral
+Stmt::child_iterator FloatingLiteral::child_begin() { return child_iterator(); }
+Stmt::child_iterator FloatingLiteral::child_end() { return child_iterator(); }
+
+// ImaginaryLiteral
+Stmt::child_iterator ImaginaryLiteral::child_begin() { return &Val; }
+Stmt::child_iterator ImaginaryLiteral::child_end() { return &Val+1; }
+
+// StringLiteral
+Stmt::child_iterator StringLiteral::child_begin() { return child_iterator(); }
+Stmt::child_iterator StringLiteral::child_end() { return child_iterator(); }
+
+// ParenExpr
+Stmt::child_iterator ParenExpr::child_begin() { return &Val; }
+Stmt::child_iterator ParenExpr::child_end() { return &Val+1; }
+
+// UnaryOperator
+Stmt::child_iterator UnaryOperator::child_begin() { return &Val; }
+Stmt::child_iterator UnaryOperator::child_end() { return &Val+1; }
+
+// SizeOfAlignOfExpr
+Stmt::child_iterator SizeOfAlignOfExpr::child_begin() {
+ // If this is of a type and the type is a VLA type (and not a typedef), the
+ // size expression of the VLA needs to be treated as an executable expression.
+ // Why isn't this weirdness documented better in StmtIterator?
+ if (isArgumentType()) {
+ if (VariableArrayType* T = dyn_cast<VariableArrayType>(
+ getArgumentType().getTypePtr()))
+ return child_iterator(T);
+ return child_iterator();
+ }
+ return child_iterator(&Argument.Ex);
+}
+Stmt::child_iterator SizeOfAlignOfExpr::child_end() {
+ if (isArgumentType())
+ return child_iterator();
+ return child_iterator(&Argument.Ex + 1);
+}
+
+// ArraySubscriptExpr
+Stmt::child_iterator ArraySubscriptExpr::child_begin() {
+ return &SubExprs[0];
+}
+Stmt::child_iterator ArraySubscriptExpr::child_end() {
+ return &SubExprs[0]+END_EXPR;
+}
+
+// CallExpr
+Stmt::child_iterator CallExpr::child_begin() {
+ return &SubExprs[0];
+}
+Stmt::child_iterator CallExpr::child_end() {
+ return &SubExprs[0]+NumArgs+ARGS_START;
+}
+
+// MemberExpr
+Stmt::child_iterator MemberExpr::child_begin() { return &Base; }
+Stmt::child_iterator MemberExpr::child_end() { return &Base+1; }
+
+// ExtVectorElementExpr
+Stmt::child_iterator ExtVectorElementExpr::child_begin() { return &Base; }
+Stmt::child_iterator ExtVectorElementExpr::child_end() { return &Base+1; }
+
+// CompoundLiteralExpr
+Stmt::child_iterator CompoundLiteralExpr::child_begin() { return &Init; }
+Stmt::child_iterator CompoundLiteralExpr::child_end() { return &Init+1; }
+
+// CastExpr
+Stmt::child_iterator CastExpr::child_begin() { return &Op; }
+Stmt::child_iterator CastExpr::child_end() { return &Op+1; }
+
+// BinaryOperator
+Stmt::child_iterator BinaryOperator::child_begin() {
+ return &SubExprs[0];
+}
+Stmt::child_iterator BinaryOperator::child_end() {
+ return &SubExprs[0]+END_EXPR;
+}
+
+// ConditionalOperator
+Stmt::child_iterator ConditionalOperator::child_begin() {
+ return &SubExprs[0];
+}
+Stmt::child_iterator ConditionalOperator::child_end() {
+ return &SubExprs[0]+END_EXPR;
+}
+
+// AddrLabelExpr
+Stmt::child_iterator AddrLabelExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator AddrLabelExpr::child_end() { return child_iterator(); }
+
+// StmtExpr
+Stmt::child_iterator StmtExpr::child_begin() { return &SubStmt; }
+Stmt::child_iterator StmtExpr::child_end() { return &SubStmt+1; }
+
+// TypesCompatibleExpr
+Stmt::child_iterator TypesCompatibleExpr::child_begin() {
+ return child_iterator();
+}
+
+Stmt::child_iterator TypesCompatibleExpr::child_end() {
+ return child_iterator();
+}
+
+// ChooseExpr
+Stmt::child_iterator ChooseExpr::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator ChooseExpr::child_end() { return &SubExprs[0]+END_EXPR; }
+
+// GNUNullExpr
+Stmt::child_iterator GNUNullExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator GNUNullExpr::child_end() { return child_iterator(); }
+
+// ShuffleVectorExpr
+Stmt::child_iterator ShuffleVectorExpr::child_begin() {
+ return &SubExprs[0];
+}
+Stmt::child_iterator ShuffleVectorExpr::child_end() {
+ return &SubExprs[0]+NumExprs;
+}
+
+// VAArgExpr
+Stmt::child_iterator VAArgExpr::child_begin() { return &Val; }
+Stmt::child_iterator VAArgExpr::child_end() { return &Val+1; }
+
+// InitListExpr
+Stmt::child_iterator InitListExpr::child_begin() {
+ return InitExprs.size() ? &InitExprs[0] : 0;
+}
+Stmt::child_iterator InitListExpr::child_end() {
+ return InitExprs.size() ? &InitExprs[0] + InitExprs.size() : 0;
+}
+
+// DesignatedInitExpr
+Stmt::child_iterator DesignatedInitExpr::child_begin() {
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ return reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+}
+Stmt::child_iterator DesignatedInitExpr::child_end() {
+ return child_iterator(&*child_begin() + NumSubExprs);
+}
+
+// ImplicitValueInitExpr
+Stmt::child_iterator ImplicitValueInitExpr::child_begin() {
+ return child_iterator();
+}
+
+Stmt::child_iterator ImplicitValueInitExpr::child_end() {
+ return child_iterator();
+}
+
+// ObjCStringLiteral
+Stmt::child_iterator ObjCStringLiteral::child_begin() {
+ return &String;
+}
+Stmt::child_iterator ObjCStringLiteral::child_end() {
+ return &String+1;
+}
+
+// ObjCEncodeExpr
+Stmt::child_iterator ObjCEncodeExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator ObjCEncodeExpr::child_end() { return child_iterator(); }
+
+// ObjCSelectorExpr
+Stmt::child_iterator ObjCSelectorExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator ObjCSelectorExpr::child_end() {
+ return child_iterator();
+}
+
+// ObjCProtocolExpr
+Stmt::child_iterator ObjCProtocolExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator ObjCProtocolExpr::child_end() {
+ return child_iterator();
+}
+
+// ObjCMessageExpr
+Stmt::child_iterator ObjCMessageExpr::child_begin() {
+ return getReceiver() ? &SubExprs[0] : &SubExprs[0] + ARGS_START;
+}
+Stmt::child_iterator ObjCMessageExpr::child_end() {
+ return &SubExprs[0]+ARGS_START+getNumArgs();
+}
+
+// Blocks
+Stmt::child_iterator BlockExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator BlockExpr::child_end() { return child_iterator(); }
+
+Stmt::child_iterator BlockDeclRefExpr::child_begin() { return child_iterator();}
+Stmt::child_iterator BlockDeclRefExpr::child_end() { return child_iterator(); }
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
new file mode 100644
index 0000000..4a15245
--- /dev/null
+++ b/lib/AST/ExprCXX.cpp
@@ -0,0 +1,424 @@
+//===--- ExprCXX.cpp - (C++) Expression AST Node Implementation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Expr class declared in ExprCXX.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+using namespace clang;
+
+void CXXConditionDeclExpr::Destroy(ASTContext& C) {
+ // FIXME: Cannot destroy the decl here, because it is linked into the
+ // DeclContext's chain.
+ //getVarDecl()->Destroy(C);
+ this->~CXXConditionDeclExpr();
+ C.Deallocate(this);
+}
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+// CXXTypeidExpr - has child iterators if the operand is an expression
+Stmt::child_iterator CXXTypeidExpr::child_begin() {
+ return isTypeOperand() ? child_iterator() : &Operand.Ex;
+}
+Stmt::child_iterator CXXTypeidExpr::child_end() {
+ return isTypeOperand() ? child_iterator() : &Operand.Ex+1;
+}
+
+// CXXBoolLiteralExpr
+Stmt::child_iterator CXXBoolLiteralExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator CXXBoolLiteralExpr::child_end() {
+ return child_iterator();
+}
+
+// CXXNullPtrLiteralExpr
+Stmt::child_iterator CXXNullPtrLiteralExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator CXXNullPtrLiteralExpr::child_end() {
+ return child_iterator();
+}
+
+// CXXThisExpr
+Stmt::child_iterator CXXThisExpr::child_begin() { return child_iterator(); }
+Stmt::child_iterator CXXThisExpr::child_end() { return child_iterator(); }
+
+// CXXThrowExpr
+Stmt::child_iterator CXXThrowExpr::child_begin() { return &Op; }
+Stmt::child_iterator CXXThrowExpr::child_end() {
+ // If Op is 0, we are processing throw; which has no children.
+ return Op ? &Op+1 : &Op;
+}
+
+// CXXDefaultArgExpr
+Stmt::child_iterator CXXDefaultArgExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator CXXDefaultArgExpr::child_end() {
+ return child_iterator();
+}
+
+// CXXZeroInitValueExpr
+Stmt::child_iterator CXXZeroInitValueExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator CXXZeroInitValueExpr::child_end() {
+ return child_iterator();
+}
+
+// CXXConditionDeclExpr
+Stmt::child_iterator CXXConditionDeclExpr::child_begin() {
+ return getVarDecl();
+}
+Stmt::child_iterator CXXConditionDeclExpr::child_end() {
+ return child_iterator();
+}
+
+// CXXNewExpr
+CXXNewExpr::CXXNewExpr(bool globalNew, FunctionDecl *operatorNew,
+ Expr **placementArgs, unsigned numPlaceArgs,
+ bool parenTypeId, Expr *arraySize,
+ CXXConstructorDecl *constructor, bool initializer,
+ Expr **constructorArgs, unsigned numConsArgs,
+ FunctionDecl *operatorDelete, QualType ty,
+ SourceLocation startLoc, SourceLocation endLoc)
+ : Expr(CXXNewExprClass, ty, ty->isDependentType(), ty->isDependentType()),
+ GlobalNew(globalNew), ParenTypeId(parenTypeId),
+ Initializer(initializer), Array(arraySize), NumPlacementArgs(numPlaceArgs),
+ NumConstructorArgs(numConsArgs), OperatorNew(operatorNew),
+ OperatorDelete(operatorDelete), Constructor(constructor),
+ StartLoc(startLoc), EndLoc(endLoc)
+{
+ unsigned TotalSize = Array + NumPlacementArgs + NumConstructorArgs;
+ SubExprs = new Stmt*[TotalSize];
+ unsigned i = 0;
+ if (Array)
+ SubExprs[i++] = arraySize;
+ for (unsigned j = 0; j < NumPlacementArgs; ++j)
+ SubExprs[i++] = placementArgs[j];
+ for (unsigned j = 0; j < NumConstructorArgs; ++j)
+ SubExprs[i++] = constructorArgs[j];
+ assert(i == TotalSize);
+}
+
+Stmt::child_iterator CXXNewExpr::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator CXXNewExpr::child_end() {
+ return &SubExprs[0] + Array + getNumPlacementArgs() + getNumConstructorArgs();
+}
+
+// CXXDeleteExpr
+Stmt::child_iterator CXXDeleteExpr::child_begin() { return &Argument; }
+Stmt::child_iterator CXXDeleteExpr::child_end() { return &Argument+1; }
+
+// UnresolvedFunctionNameExpr
+Stmt::child_iterator UnresolvedFunctionNameExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator UnresolvedFunctionNameExpr::child_end() {
+ return child_iterator();
+}
+
+UnresolvedFunctionNameExpr*
+UnresolvedFunctionNameExpr::Clone(ASTContext &C) const {
+ return new (C) UnresolvedFunctionNameExpr(Name, getType(), Loc);
+}
+
+// UnaryTypeTraitExpr
+Stmt::child_iterator UnaryTypeTraitExpr::child_begin() {
+ return child_iterator();
+}
+Stmt::child_iterator UnaryTypeTraitExpr::child_end() {
+ return child_iterator();
+}
+
+// UnresolvedDeclRefExpr
+StmtIterator UnresolvedDeclRefExpr::child_begin() {
+ return child_iterator();
+}
+
+StmtIterator UnresolvedDeclRefExpr::child_end() {
+ return child_iterator();
+}
+
+bool UnaryTypeTraitExpr::EvaluateTrait() const {
+ switch(UTT) {
+ default: assert(false && "Unknown type trait or not implemented");
+ case UTT_IsPOD: return QueriedType->isPODType();
+ case UTT_IsClass: // Fallthrough
+ case UTT_IsUnion:
+ if (const RecordType *Record = QueriedType->getAsRecordType()) {
+ bool Union = Record->getDecl()->isUnion();
+ return UTT == UTT_IsUnion ? Union : !Union;
+ }
+ return false;
+ case UTT_IsEnum: return QueriedType->isEnumeralType();
+ case UTT_IsPolymorphic:
+ if (const RecordType *Record = QueriedType->getAsRecordType()) {
+ // Type traits are only parsed in C++, so we've got CXXRecords.
+ return cast<CXXRecordDecl>(Record->getDecl())->isPolymorphic();
+ }
+ return false;
+ case UTT_IsAbstract:
+ if (const RecordType *RT = QueriedType->getAsRecordType())
+ return cast<CXXRecordDecl>(RT->getDecl())->isAbstract();
+ return false;
+ case UTT_HasTrivialConstructor:
+ if (const RecordType *RT = QueriedType->getAsRecordType())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialConstructor();
+ return false;
+ case UTT_HasTrivialDestructor:
+ if (const RecordType *RT = QueriedType->getAsRecordType())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor();
+ return false;
+ }
+}
+
+SourceRange CXXOperatorCallExpr::getSourceRange() const {
+ OverloadedOperatorKind Kind = getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (getNumArgs() == 1)
+ // Prefix operator
+ return SourceRange(getOperatorLoc(),
+ getArg(0)->getSourceRange().getEnd());
+ else
+ // Postfix operator
+ return SourceRange(getArg(0)->getSourceRange().getEnd(),
+ getOperatorLoc());
+ } else if (Kind == OO_Call) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ } else if (Kind == OO_Subscript) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ } else if (getNumArgs() == 1) {
+ return SourceRange(getOperatorLoc(), getArg(0)->getSourceRange().getEnd());
+ } else if (getNumArgs() == 2) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(),
+ getArg(1)->getSourceRange().getEnd());
+ } else {
+ return SourceRange();
+ }
+}
+
+Expr *CXXMemberCallExpr::getImplicitObjectArgument() {
+ if (MemberExpr *MemExpr = dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ return MemExpr->getBase();
+
+ // FIXME: Will eventually need to cope with member pointers.
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Named casts
+//===----------------------------------------------------------------------===//
+
+/// getCastName - Get the name of the C++ cast being used, e.g.,
+/// "static_cast", "dynamic_cast", "reinterpret_cast", or
+/// "const_cast". The returned pointer must not be freed.
+const char *CXXNamedCastExpr::getCastName() const {
+ switch (getStmtClass()) {
+ case CXXStaticCastExprClass: return "static_cast";
+ case CXXDynamicCastExprClass: return "dynamic_cast";
+ case CXXReinterpretCastExprClass: return "reinterpret_cast";
+ case CXXConstCastExprClass: return "const_cast";
+ default: return "<invalid cast>";
+ }
+}
+
+CXXTemporary *CXXTemporary::Create(ASTContext &C,
+ const CXXDestructorDecl *Destructor) {
+ return new (C) CXXTemporary(Destructor);
+}
+
+void CXXTemporary::Destroy(ASTContext &C) {
+ this->~CXXTemporary();
+ C.Deallocate(this);
+}
+
+CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(ASTContext &C,
+ CXXTemporary *Temp,
+ Expr* SubExpr) {
+ assert(SubExpr->getType()->isRecordType() &&
+ "Expression bound to a temporary must have record type!");
+
+ return new (C) CXXBindTemporaryExpr(Temp, SubExpr);
+}
+
+void CXXBindTemporaryExpr::Destroy(ASTContext &C) {
+ Temp->Destroy(C);
+ this->~CXXBindTemporaryExpr();
+ C.Deallocate(this);
+}
+
+CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(ASTContext &C,
+ CXXConstructorDecl *Cons,
+ QualType writtenTy,
+ SourceLocation tyBeginLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation rParenLoc)
+ : CXXConstructExpr(C, CXXTemporaryObjectExprClass, writtenTy, Cons,
+ false, Args, NumArgs),
+ TyBeginLoc(tyBeginLoc), RParenLoc(rParenLoc) {
+}
+
+CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
+ CXXConstructorDecl *D, bool Elidable,
+ Expr **Args, unsigned NumArgs) {
+ return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, D, Elidable,
+ Args, NumArgs);
+}
+
+CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
+ CXXConstructorDecl *D, bool elidable,
+ Expr **args, unsigned numargs)
+: Expr(SC, T,
+ T->isDependentType(),
+ (T->isDependentType() ||
+ CallExpr::hasAnyValueDependentArguments(args, numargs))),
+ Constructor(D), Elidable(elidable), Args(0), NumArgs(numargs) {
+ if (NumArgs > 0) {
+ Args = new (C) Stmt*[NumArgs];
+ for (unsigned i = 0; i < NumArgs; ++i)
+ Args[i] = args[i];
+ }
+}
+
+void CXXConstructExpr::Destroy(ASTContext &C) {
+ DestroyChildren(C);
+ if (Args)
+ C.Deallocate(Args);
+ this->~CXXConstructExpr();
+ C.Deallocate(this);
+}
+
+CXXExprWithTemporaries::CXXExprWithTemporaries(Expr *subexpr,
+ CXXTemporary **temps,
+ unsigned numtemps)
+: Expr(CXXExprWithTemporariesClass, subexpr->getType(),
+ subexpr->isTypeDependent(), subexpr->isValueDependent()),
+ SubExpr(subexpr), Temps(0), NumTemps(numtemps) {
+ if (NumTemps > 0) {
+ Temps = new CXXTemporary*[NumTemps];
+ for (unsigned i = 0; i < NumTemps; ++i)
+ Temps[i] = temps[i];
+ }
+}
+
+CXXExprWithTemporaries *CXXExprWithTemporaries::Create(ASTContext &C,
+ Expr *SubExpr,
+ CXXTemporary **Temps,
+ unsigned NumTemps) {
+ return new (C) CXXExprWithTemporaries(SubExpr, Temps, NumTemps);
+}
+
+void CXXExprWithTemporaries::Destroy(ASTContext &C) {
+ DestroyChildren(C);
+ this->~CXXExprWithTemporaries();
+ C.Deallocate(this);
+}
+
+CXXExprWithTemporaries::~CXXExprWithTemporaries() {
+ delete[] Temps;
+}
+
+// CXXBindTemporaryExpr
+Stmt::child_iterator CXXBindTemporaryExpr::child_begin() {
+ return &SubExpr;
+}
+
+Stmt::child_iterator CXXBindTemporaryExpr::child_end() {
+ return &SubExpr + 1;
+}
+
+// CXXConstructExpr
+Stmt::child_iterator CXXConstructExpr::child_begin() {
+ return &Args[0];
+}
+Stmt::child_iterator CXXConstructExpr::child_end() {
+ return &Args[0]+NumArgs;
+}
+
+// CXXExprWithTemporaries
+Stmt::child_iterator CXXExprWithTemporaries::child_begin() {
+ return &SubExpr;
+}
+
+Stmt::child_iterator CXXExprWithTemporaries::child_end() {
+ return &SubExpr + 1;
+}
+
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(
+ SourceLocation TyBeginLoc,
+ QualType T,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc)
+ : Expr(CXXUnresolvedConstructExprClass, T.getNonReferenceType(),
+ T->isDependentType(), true),
+ TyBeginLoc(TyBeginLoc),
+ Type(T),
+ LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc),
+ NumArgs(NumArgs) {
+ Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
+ memcpy(StoredArgs, Args, sizeof(Expr *) * NumArgs);
+}
+
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::Create(ASTContext &C,
+ SourceLocation TyBegin,
+ QualType T,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
+ sizeof(Expr *) * NumArgs);
+ return new (Mem) CXXUnresolvedConstructExpr(TyBegin, T, LParenLoc,
+ Args, NumArgs, RParenLoc);
+}
+
+Stmt::child_iterator CXXUnresolvedConstructExpr::child_begin() {
+ return child_iterator(reinterpret_cast<Stmt **>(this + 1));
+}
+
+Stmt::child_iterator CXXUnresolvedConstructExpr::child_end() {
+ return child_iterator(reinterpret_cast<Stmt **>(this + 1) + NumArgs);
+}
+
+Stmt::child_iterator CXXUnresolvedMemberExpr::child_begin() {
+ return child_iterator(&Base);
+}
+
+Stmt::child_iterator CXXUnresolvedMemberExpr::child_end() {
+ return child_iterator(&Base + 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Cloners
+//===----------------------------------------------------------------------===//
+
+CXXBoolLiteralExpr* CXXBoolLiteralExpr::Clone(ASTContext &C) const {
+ return new (C) CXXBoolLiteralExpr(Value, getType(), Loc);
+}
+
+CXXNullPtrLiteralExpr* CXXNullPtrLiteralExpr::Clone(ASTContext &C) const {
+ return new (C) CXXNullPtrLiteralExpr(getType(), Loc);
+}
+
+CXXZeroInitValueExpr* CXXZeroInitValueExpr::Clone(ASTContext &C) const {
+ return new (C) CXXZeroInitValueExpr(getType(), TyBeginLoc, RParenLoc);
+}
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
new file mode 100644
index 0000000..50fdcfd
--- /dev/null
+++ b/lib/AST/ExprConstant.cpp
@@ -0,0 +1,1723 @@
+//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr constant evaluator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Compiler.h"
+#include <cstring>
+
+using namespace clang;
+using llvm::APSInt;
+using llvm::APFloat;
+
+/// EvalInfo - This is a private struct used by the evaluator to capture
+/// information about a subexpression as it is folded. It retains information
+/// about the AST context, but also maintains information about the folded
+/// expression.
+///
+/// If an expression could be evaluated, it is still possible it is not a C
+/// "integer constant expression" or constant expression. If not, this struct
+/// captures information about how and why not.
+///
+/// One bit of information passed *into* the request for constant folding
+/// indicates whether the subexpression is "evaluated" or not according to C
+/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
+/// evaluate the expression regardless of what the RHS is, but C only allows
+/// certain things in certain situations.
+struct EvalInfo {
+ ASTContext &Ctx;
+
+ /// EvalResult - Contains information about the evaluation.
+ Expr::EvalResult &EvalResult;
+
+ EvalInfo(ASTContext &ctx, Expr::EvalResult& evalresult) : Ctx(ctx),
+ EvalResult(evalresult) {}
+};
+
+
+static bool EvaluateLValue(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluatePointer(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
+static bool EvaluateComplex(const Expr *E, APValue &Result, EvalInfo &Info);
+
+//===----------------------------------------------------------------------===//
+// Misc utilities
+//===----------------------------------------------------------------------===//
+
+static bool HandleConversionToBool(Expr* E, bool& Result, EvalInfo &Info) {
+ if (E->getType()->isIntegralType()) {
+ APSInt IntResult;
+ if (!EvaluateInteger(E, IntResult, Info))
+ return false;
+ Result = IntResult != 0;
+ return true;
+ } else if (E->getType()->isRealFloatingType()) {
+ APFloat FloatResult(0.0);
+ if (!EvaluateFloat(E, FloatResult, Info))
+ return false;
+ Result = !FloatResult.isZero();
+ return true;
+ } else if (E->getType()->hasPointerRepresentation()) {
+ APValue PointerResult;
+ if (!EvaluatePointer(E, PointerResult, Info))
+ return false;
+ // FIXME: Is this accurate for all kinds of bases? If not, what would
+ // the check look like?
+ Result = PointerResult.getLValueBase() || PointerResult.getLValueOffset();
+ return true;
+ } else if (E->getType()->isAnyComplexType()) {
+ APValue ComplexResult;
+ if (!EvaluateComplex(E, ComplexResult, Info))
+ return false;
+ if (ComplexResult.isComplexFloat()) {
+ Result = !ComplexResult.getComplexFloatReal().isZero() ||
+ !ComplexResult.getComplexFloatImag().isZero();
+ } else {
+ Result = ComplexResult.getComplexIntReal().getBoolValue() ||
+ ComplexResult.getComplexIntImag().getBoolValue();
+ }
+ return true;
+ }
+
+ return false;
+}
+
+static APSInt HandleFloatToIntCast(QualType DestType, QualType SrcType,
+ APFloat &Value, ASTContext &Ctx) {
+ unsigned DestWidth = Ctx.getIntWidth(DestType);
+ // Determine whether we are converting to unsigned or signed.
+ bool DestSigned = DestType->isSignedIntegerType();
+
+ // FIXME: Warning for overflow.
+ uint64_t Space[4];
+ bool ignored;
+ (void)Value.convertToInteger(Space, DestWidth, DestSigned,
+ llvm::APFloat::rmTowardZero, &ignored);
+ return APSInt(llvm::APInt(DestWidth, 4, Space), !DestSigned);
+}
+
+static APFloat HandleFloatToFloatCast(QualType DestType, QualType SrcType,
+ APFloat &Value, ASTContext &Ctx) {
+ bool ignored;
+ APFloat Result = Value;
+ Result.convert(Ctx.getFloatTypeSemantics(DestType),
+ APFloat::rmNearestTiesToEven, &ignored);
+ return Result;
+}
+
+static APSInt HandleIntToIntCast(QualType DestType, QualType SrcType,
+ APSInt &Value, ASTContext &Ctx) {
+ unsigned DestWidth = Ctx.getIntWidth(DestType);
+ APSInt Result = Value;
+ // Figure out if this is a truncate, extend or noop cast.
+ // If the input is signed, do a sign extend, noop, or truncate.
+ Result.extOrTrunc(DestWidth);
+ Result.setIsUnsigned(DestType->isUnsignedIntegerType());
+ return Result;
+}
+
+static APFloat HandleIntToFloatCast(QualType DestType, QualType SrcType,
+ APSInt &Value, ASTContext &Ctx) {
+
+ APFloat Result(Ctx.getFloatTypeSemantics(DestType), 1);
+ Result.convertFromAPInt(Value, Value.isSigned(),
+ APFloat::rmNearestTiesToEven);
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// LValue Evaluation
+//===----------------------------------------------------------------------===//
+namespace {
+class VISIBILITY_HIDDEN LValueExprEvaluator
+ : public StmtVisitor<LValueExprEvaluator, APValue> {
+ EvalInfo &Info;
+public:
+
+ LValueExprEvaluator(EvalInfo &info) : Info(info) {}
+
+ APValue VisitStmt(Stmt *S) {
+ return APValue();
+ }
+
+ APValue VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+ APValue VisitDeclRefExpr(DeclRefExpr *E);
+ APValue VisitBlockExpr(BlockExpr *E);
+ APValue VisitPredefinedExpr(PredefinedExpr *E) { return APValue(E, 0); }
+ APValue VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ APValue VisitMemberExpr(MemberExpr *E);
+ APValue VisitStringLiteral(StringLiteral *E) { return APValue(E, 0); }
+ APValue VisitObjCEncodeExpr(ObjCEncodeExpr *E) { return APValue(E, 0); }
+ APValue VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ APValue VisitUnaryDeref(UnaryOperator *E);
+ APValue VisitUnaryExtension(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ APValue VisitChooseExpr(const ChooseExpr *E)
+ { return Visit(E->getChosenSubExpr(Info.Ctx)); }
+ // FIXME: Missing: __real__, __imag__
+};
+} // end anonymous namespace
+
+static bool EvaluateLValue(const Expr* E, APValue& Result, EvalInfo &Info) {
+ Result = LValueExprEvaluator(Info).Visit(const_cast<Expr*>(E));
+ return Result.isLValue();
+}
+
+APValue LValueExprEvaluator::VisitDeclRefExpr(DeclRefExpr *E)
+{
+ if (!E->hasGlobalStorage())
+ return APValue();
+
+ if (isa<FunctionDecl>(E->getDecl())) {
+ return APValue(E, 0);
+ } else if (VarDecl* VD = dyn_cast<VarDecl>(E->getDecl())) {
+ if (!VD->getType()->isReferenceType())
+ return APValue(E, 0);
+ if (VD->getInit())
+ return Visit(VD->getInit());
+ }
+
+ return APValue();
+}
+
+APValue LValueExprEvaluator::VisitBlockExpr(BlockExpr *E)
+{
+ if (E->hasBlockDeclRefExprs())
+ return APValue();
+
+ return APValue(E, 0);
+}
+
+APValue LValueExprEvaluator::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ if (E->isFileScope())
+ return APValue(E, 0);
+ return APValue();
+}
+
+APValue LValueExprEvaluator::VisitMemberExpr(MemberExpr *E) {
+ APValue result;
+ QualType Ty;
+ if (E->isArrow()) {
+ if (!EvaluatePointer(E->getBase(), result, Info))
+ return APValue();
+ Ty = E->getBase()->getType()->getAsPointerType()->getPointeeType();
+ } else {
+ result = Visit(E->getBase());
+ if (result.isUninit())
+ return APValue();
+ Ty = E->getBase()->getType();
+ }
+
+ RecordDecl *RD = Ty->getAsRecordType()->getDecl();
+ const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
+
+ FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
+ if (!FD) // FIXME: deal with other kinds of member expressions
+ return APValue();
+
+ if (FD->getType()->isReferenceType())
+ return APValue();
+
+ // FIXME: This is linear time.
+ unsigned i = 0;
+ for (RecordDecl::field_iterator Field = RD->field_begin(Info.Ctx),
+ FieldEnd = RD->field_end(Info.Ctx);
+ Field != FieldEnd; (void)++Field, ++i) {
+ if (*Field == FD)
+ break;
+ }
+
+ result.setLValue(result.getLValueBase(),
+ result.getLValueOffset() + RL.getFieldOffset(i) / 8);
+
+ return result;
+}
+
+APValue LValueExprEvaluator::VisitArraySubscriptExpr(ArraySubscriptExpr *E)
+{
+ APValue Result;
+
+ if (!EvaluatePointer(E->getBase(), Result, Info))
+ return APValue();
+
+ APSInt Index;
+ if (!EvaluateInteger(E->getIdx(), Index, Info))
+ return APValue();
+
+ uint64_t ElementSize = Info.Ctx.getTypeSize(E->getType()) / 8;
+
+ uint64_t Offset = Index.getSExtValue() * ElementSize;
+ Result.setLValue(Result.getLValueBase(),
+ Result.getLValueOffset() + Offset);
+ return Result;
+}
+
+APValue LValueExprEvaluator::VisitUnaryDeref(UnaryOperator *E)
+{
+ APValue Result;
+ if (!EvaluatePointer(E->getSubExpr(), Result, Info))
+ return APValue();
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN PointerExprEvaluator
+ : public StmtVisitor<PointerExprEvaluator, APValue> {
+ EvalInfo &Info;
+public:
+
+ PointerExprEvaluator(EvalInfo &info) : Info(info) {}
+
+ APValue VisitStmt(Stmt *S) {
+ return APValue();
+ }
+
+ APValue VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+
+ APValue VisitBinaryOperator(const BinaryOperator *E);
+ APValue VisitCastExpr(const CastExpr* E);
+ APValue VisitUnaryExtension(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ APValue VisitUnaryAddrOf(const UnaryOperator *E);
+ APValue VisitObjCStringLiteral(ObjCStringLiteral *E)
+ { return APValue(E, 0); }
+ APValue VisitAddrLabelExpr(AddrLabelExpr *E)
+ { return APValue(E, 0); }
+ APValue VisitCallExpr(CallExpr *E);
+ APValue VisitBlockExpr(BlockExpr *E) {
+ if (!E->hasBlockDeclRefExprs())
+ return APValue(E, 0);
+ return APValue();
+ }
+ APValue VisitImplicitValueInitExpr(ImplicitValueInitExpr *E)
+ { return APValue((Expr*)0, 0); }
+ APValue VisitConditionalOperator(ConditionalOperator *E);
+ APValue VisitChooseExpr(ChooseExpr *E)
+ { return Visit(E->getChosenSubExpr(Info.Ctx)); }
+ APValue VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E)
+ { return APValue((Expr*)0, 0); }
+ // FIXME: Missing: @protocol, @selector
+};
+} // end anonymous namespace
+
+static bool EvaluatePointer(const Expr* E, APValue& Result, EvalInfo &Info) {
+ if (!E->getType()->hasPointerRepresentation())
+ return false;
+ Result = PointerExprEvaluator(Info).Visit(const_cast<Expr*>(E));
+ return Result.isLValue();
+}
+
+APValue PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() != BinaryOperator::Add &&
+ E->getOpcode() != BinaryOperator::Sub)
+ return APValue();
+
+ const Expr *PExp = E->getLHS();
+ const Expr *IExp = E->getRHS();
+ if (IExp->getType()->isPointerType())
+ std::swap(PExp, IExp);
+
+ APValue ResultLValue;
+ if (!EvaluatePointer(PExp, ResultLValue, Info))
+ return APValue();
+
+ llvm::APSInt AdditionalOffset(32);
+ if (!EvaluateInteger(IExp, AdditionalOffset, Info))
+ return APValue();
+
+ QualType PointeeType = PExp->getType()->getAsPointerType()->getPointeeType();
+ uint64_t SizeOfPointee;
+
+ // Explicitly handle GNU void* and function pointer arithmetic extensions.
+ if (PointeeType->isVoidType() || PointeeType->isFunctionType())
+ SizeOfPointee = 1;
+ else
+ SizeOfPointee = Info.Ctx.getTypeSize(PointeeType) / 8;
+
+ uint64_t Offset = ResultLValue.getLValueOffset();
+
+ if (E->getOpcode() == BinaryOperator::Add)
+ Offset += AdditionalOffset.getLimitedValue() * SizeOfPointee;
+ else
+ Offset -= AdditionalOffset.getLimitedValue() * SizeOfPointee;
+
+ return APValue(ResultLValue.getLValueBase(), Offset);
+}
+
+APValue PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ APValue result;
+ if (EvaluateLValue(E->getSubExpr(), result, Info))
+ return result;
+ return APValue();
+}
+
+
+APValue PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ // Check for pointer->pointer cast
+ if (SubExpr->getType()->isPointerType()) {
+ APValue Result;
+ if (EvaluatePointer(SubExpr, Result, Info))
+ return Result;
+ return APValue();
+ }
+
+ if (SubExpr->getType()->isIntegralType()) {
+ APValue Result;
+ if (!EvaluateIntegerOrLValue(SubExpr, Result, Info))
+ return APValue();
+
+ if (Result.isInt()) {
+ Result.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
+ return APValue(0, Result.getInt().getZExtValue());
+ }
+
+ // Cast is of an lvalue, no need to change value.
+ return Result;
+ }
+
+ if (SubExpr->getType()->isFunctionType() ||
+ SubExpr->getType()->isBlockPointerType() ||
+ SubExpr->getType()->isArrayType()) {
+ APValue Result;
+ if (EvaluateLValue(SubExpr, Result, Info))
+ return Result;
+ return APValue();
+ }
+
+ return APValue();
+}
+
+APValue PointerExprEvaluator::VisitCallExpr(CallExpr *E) {
+ if (E->isBuiltinCall(Info.Ctx) ==
+ Builtin::BI__builtin___CFStringMakeConstantString)
+ return APValue(E, 0);
+ return APValue();
+}
+
+APValue PointerExprEvaluator::VisitConditionalOperator(ConditionalOperator *E) {
+ bool BoolResult;
+ if (!HandleConversionToBool(E->getCond(), BoolResult, Info))
+ return APValue();
+
+ Expr* EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
+
+ APValue Result;
+ if (EvaluatePointer(EvalExpr, Result, Info))
+ return Result;
+ return APValue();
+}
+
+//===----------------------------------------------------------------------===//
+// Vector Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VISIBILITY_HIDDEN VectorExprEvaluator
+ : public StmtVisitor<VectorExprEvaluator, APValue> {
+ EvalInfo &Info;
+ APValue GetZeroVector(QualType VecType);
+ public:
+
+ VectorExprEvaluator(EvalInfo &info) : Info(info) {}
+
+ APValue VisitStmt(Stmt *S) {
+ return APValue();
+ }
+
+ APValue VisitParenExpr(ParenExpr *E)
+ { return Visit(E->getSubExpr()); }
+ APValue VisitUnaryExtension(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ APValue VisitUnaryPlus(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ APValue VisitUnaryReal(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ APValue VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E)
+ { return GetZeroVector(E->getType()); }
+ APValue VisitCastExpr(const CastExpr* E);
+ APValue VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
+ APValue VisitInitListExpr(const InitListExpr *E);
+ APValue VisitConditionalOperator(const ConditionalOperator *E);
+ APValue VisitChooseExpr(const ChooseExpr *E)
+ { return Visit(E->getChosenSubExpr(Info.Ctx)); }
+ APValue VisitUnaryImag(const UnaryOperator *E);
+ // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
+ // binary comparisons, binary and/or/xor,
+ // shufflevector, ExtVectorElementExpr
+ // (Note that these require implementing conversions
+ // between vector types.)
+ };
+} // end anonymous namespace
+
+static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
+ if (!E->getType()->isVectorType())
+ return false;
+ Result = VectorExprEvaluator(Info).Visit(const_cast<Expr*>(E));
+ return !Result.isUninit();
+}
+
+APValue VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const Expr* SE = E->getSubExpr();
+
+ // Check for vector->vector bitcast.
+ if (SE->getType()->isVectorType())
+ return this->Visit(const_cast<Expr*>(SE));
+
+ return APValue();
+}
+
+APValue
+VectorExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ return this->Visit(const_cast<Expr*>(E->getInitializer()));
+}
+
+APValue
+VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const VectorType *VT = E->getType()->getAsVectorType();
+ unsigned NumInits = E->getNumInits();
+ unsigned NumElements = VT->getNumElements();
+
+ QualType EltTy = VT->getElementType();
+ llvm::SmallVector<APValue, 4> Elements;
+
+ for (unsigned i = 0; i < NumElements; i++) {
+ if (EltTy->isIntegerType()) {
+ llvm::APSInt sInt(32);
+ if (i < NumInits) {
+ if (!EvaluateInteger(E->getInit(i), sInt, Info))
+ return APValue();
+ } else {
+ sInt = Info.Ctx.MakeIntValue(0, EltTy);
+ }
+ Elements.push_back(APValue(sInt));
+ } else {
+ llvm::APFloat f(0.0);
+ if (i < NumInits) {
+ if (!EvaluateFloat(E->getInit(i), f, Info))
+ return APValue();
+ } else {
+ f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ }
+ Elements.push_back(APValue(f));
+ }
+ }
+ return APValue(&Elements[0], Elements.size());
+}
+
+APValue
+VectorExprEvaluator::GetZeroVector(QualType T) {
+ const VectorType *VT = T->getAsVectorType();
+ QualType EltTy = VT->getElementType();
+ APValue ZeroElement;
+ if (EltTy->isIntegerType())
+ ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy));
+ else
+ ZeroElement =
+ APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)));
+
+ llvm::SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
+ return APValue(&Elements[0], Elements.size());
+}
+
+APValue VectorExprEvaluator::VisitConditionalOperator(const ConditionalOperator *E) {
+ bool BoolResult;
+ if (!HandleConversionToBool(E->getCond(), BoolResult, Info))
+ return APValue();
+
+ Expr* EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
+
+ APValue Result;
+ if (EvaluateVector(EvalExpr, Result, Info))
+ return Result;
+ return APValue();
+}
+
+APValue VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (!E->getSubExpr()->isEvaluatable(Info.Ctx))
+ Info.EvalResult.HasSideEffects = true;
+ return GetZeroVector(E->getType());
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN IntExprEvaluator
+ : public StmtVisitor<IntExprEvaluator, bool> {
+ EvalInfo &Info;
+ APValue &Result;
+public:
+ IntExprEvaluator(EvalInfo &info, APValue &result)
+ : Info(info), Result(result) {}
+
+ bool Success(const llvm::APSInt &SI, const Expr *E) {
+ assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(SI.isSigned() == E->getType()->isSignedIntegerType() &&
+ "Invalid evaluation result.");
+ assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(SI);
+ return true;
+ }
+
+ bool Success(const llvm::APInt &I, const Expr *E) {
+ assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(APSInt(I));
+ Result.getInt().setIsUnsigned(E->getType()->isUnsignedIntegerType());
+ return true;
+ }
+
+ bool Success(uint64_t Value, const Expr *E) {
+ assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
+ return true;
+ }
+
+ bool Error(SourceLocation L, diag::kind D, const Expr *E) {
+ // Take the first error.
+ if (Info.EvalResult.Diag == 0) {
+ Info.EvalResult.DiagLoc = L;
+ Info.EvalResult.Diag = D;
+ Info.EvalResult.DiagExpr = E;
+ }
+ return false;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitStmt(Stmt *) {
+ assert(0 && "This should be called on integers, stmts are not integers");
+ return false;
+ }
+
+ bool VisitExpr(Expr *E) {
+ return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
+ }
+
+ bool VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+
+ bool VisitIntegerLiteral(const IntegerLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+ bool VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
+ // Per gcc docs "this built-in function ignores top level
+ // qualifiers". We need to use the canonical version to properly
+ // be able to strip CRV qualifiers from the type.
+ QualType T0 = Info.Ctx.getCanonicalType(E->getArgType1());
+ QualType T1 = Info.Ctx.getCanonicalType(E->getArgType2());
+ return Success(Info.Ctx.typesAreCompatible(T0.getUnqualifiedType(),
+ T1.getUnqualifiedType()),
+ E);
+ }
+ bool VisitDeclRefExpr(const DeclRefExpr *E);
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitConditionalOperator(const ConditionalOperator *E);
+
+ bool VisitCastExpr(CastExpr* E);
+ bool VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
+
+ bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitGNUNullExpr(const GNUNullExpr *E) {
+ return Success(0, E);
+ }
+
+ bool VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ return Success(0, E);
+ }
+
+ bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return Success(0, E);
+ }
+
+ bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+ return Success(E->EvaluateTrait(), E);
+ }
+
+ bool VisitChooseExpr(const ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(Info.Ctx));
+ }
+
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+private:
+ unsigned GetAlignOfExpr(const Expr *E);
+ unsigned GetAlignOfType(QualType T);
+ // FIXME: Missing: array subscript of vector, member of vector
+};
+} // end anonymous namespace
+
+static bool EvaluateIntegerOrLValue(const Expr* E, APValue &Result, EvalInfo &Info) {
+ if (!E->getType()->isIntegralType())
+ return false;
+
+ return IntExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
+}
+
+static bool EvaluateInteger(const Expr* E, APSInt &Result, EvalInfo &Info) {
+ APValue Val;
+ if (!EvaluateIntegerOrLValue(E, Val, Info) || !Val.isInt())
+ return false;
+ Result = Val.getInt();
+ return true;
+}
+
+bool IntExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
+ // Enums are integer constant exprs.
+ if (const EnumConstantDecl *D = dyn_cast<EnumConstantDecl>(E->getDecl())) {
+ // FIXME: This is an ugly hack around the fact that enums don't set their
+ // signedness consistently; see PR3173.
+ APSInt SI = D->getInitVal();
+ SI.setIsUnsigned(!E->getType()->isSignedIntegerType());
+ // FIXME: This is an ugly hack around the fact that enums don't
+ // set their width (!?!) consistently; see PR3173.
+ SI.extOrTrunc(Info.Ctx.getIntWidth(E->getType()));
+ return Success(SI, E);
+ }
+
+ // In C++, const, non-volatile integers initialized with ICEs are ICEs.
+ // In C, they can also be folded, although they are not ICEs.
+ if (E->getType().getCVRQualifiers() == QualType::Const) {
+ if (const VarDecl *D = dyn_cast<VarDecl>(E->getDecl())) {
+ if (APValue *V = D->getEvaluatedValue())
+ return Success(V->getInt(), E);
+ if (const Expr *Init = D->getInit()) {
+ if (Visit(const_cast<Expr*>(Init))) {
+ // Cache the evaluated value in the variable declaration.
+ D->setEvaluatedValue(Info.Ctx, Result);
+ return true;
+ }
+
+ return false;
+ }
+ }
+ }
+
+ // Otherwise, random variable references are not constants.
+ return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
+}
+
+/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
+/// as GCC.
+static int EvaluateBuiltinClassifyType(const CallExpr *E) {
+ // The following enum mimics the values returned by GCC.
+ // FIXME: Does GCC differ between lvalue and rvalue references here?
+ enum gcc_type_class {
+ no_type_class = -1,
+ void_type_class, integer_type_class, char_type_class,
+ enumeral_type_class, boolean_type_class,
+ pointer_type_class, reference_type_class, offset_type_class,
+ real_type_class, complex_type_class,
+ function_type_class, method_type_class,
+ record_type_class, union_type_class,
+ array_type_class, string_type_class,
+ lang_type_class
+ };
+
+ // If no argument was supplied, default to "no_type_class". This isn't
+ // ideal, however it is what gcc does.
+ if (E->getNumArgs() == 0)
+ return no_type_class;
+
+ QualType ArgTy = E->getArg(0)->getType();
+ if (ArgTy->isVoidType())
+ return void_type_class;
+ else if (ArgTy->isEnumeralType())
+ return enumeral_type_class;
+ else if (ArgTy->isBooleanType())
+ return boolean_type_class;
+ else if (ArgTy->isCharType())
+ return string_type_class; // gcc doesn't appear to use char_type_class
+ else if (ArgTy->isIntegerType())
+ return integer_type_class;
+ else if (ArgTy->isPointerType())
+ return pointer_type_class;
+ else if (ArgTy->isReferenceType())
+ return reference_type_class;
+ else if (ArgTy->isRealType())
+ return real_type_class;
+ else if (ArgTy->isComplexType())
+ return complex_type_class;
+ else if (ArgTy->isFunctionType())
+ return function_type_class;
+ else if (ArgTy->isStructureType())
+ return record_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else if (ArgTy->isArrayType())
+ return array_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else // FIXME: offset_type_class, method_type_class, & lang_type_class?
+ assert(0 && "CallExpr::isBuiltinClassifyType(): unimplemented type");
+ return -1;
+}
+
+bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (E->isBuiltinCall(Info.Ctx)) {
+ default:
+ return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
+ case Builtin::BI__builtin_classify_type:
+ return Success(EvaluateBuiltinClassifyType(E), E);
+
+ case Builtin::BI__builtin_constant_p:
+ // __builtin_constant_p always has one operand: it returns true if that
+ // operand can be folded, false otherwise.
+ return Success(E->getArg(0)->isEvaluatable(Info.Ctx), E);
+ }
+}
+
+bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() == BinaryOperator::Comma) {
+ if (!Visit(E->getRHS()))
+ return false;
+
+ // If we can't evaluate the LHS, it might have side effects;
+ // conservatively mark it.
+ if (!E->getLHS()->isEvaluatable(Info.Ctx))
+ Info.EvalResult.HasSideEffects = true;
+
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ // These need to be handled specially because the operands aren't
+ // necessarily integral
+ bool lhsResult, rhsResult;
+
+ if (HandleConversionToBool(E->getLHS(), lhsResult, Info)) {
+ // We were able to evaluate the LHS, see if we can get away with not
+ // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
+ if (lhsResult == (E->getOpcode() == BinaryOperator::LOr))
+ return Success(lhsResult, E);
+
+ if (HandleConversionToBool(E->getRHS(), rhsResult, Info)) {
+ if (E->getOpcode() == BinaryOperator::LOr)
+ return Success(lhsResult || rhsResult, E);
+ else
+ return Success(lhsResult && rhsResult, E);
+ }
+ } else {
+ if (HandleConversionToBool(E->getRHS(), rhsResult, Info)) {
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ if (rhsResult == (E->getOpcode() == BinaryOperator::LOr) ||
+ !rhsResult == (E->getOpcode() == BinaryOperator::LAnd)) {
+ // Since we weren't able to evaluate the left hand side, it
+ // must have had side effects.
+ Info.EvalResult.HasSideEffects = true;
+
+ return Success(rhsResult, E);
+ }
+ }
+ }
+
+ return false;
+ }
+
+ QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
+
+ if (LHSTy->isAnyComplexType()) {
+ assert(RHSTy->isAnyComplexType() && "Invalid comparison");
+ APValue LHS, RHS;
+
+ if (!EvaluateComplex(E->getLHS(), LHS, Info))
+ return false;
+
+ if (!EvaluateComplex(E->getRHS(), RHS, Info))
+ return false;
+
+ if (LHS.isComplexFloat()) {
+ APFloat::cmpResult CR_r =
+ LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
+ APFloat::cmpResult CR_i =
+ LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
+
+ if (E->getOpcode() == BinaryOperator::EQ)
+ return Success((CR_r == APFloat::cmpEqual &&
+ CR_i == APFloat::cmpEqual), E);
+ else {
+ assert(E->getOpcode() == BinaryOperator::NE &&
+ "Invalid complex comparison.");
+ return Success(((CR_r == APFloat::cmpGreaterThan ||
+ CR_r == APFloat::cmpLessThan) &&
+ (CR_i == APFloat::cmpGreaterThan ||
+ CR_i == APFloat::cmpLessThan)), E);
+ }
+ } else {
+ if (E->getOpcode() == BinaryOperator::EQ)
+ return Success((LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
+ LHS.getComplexIntImag() == RHS.getComplexIntImag()), E);
+ else {
+ assert(E->getOpcode() == BinaryOperator::NE &&
+ "Invalid compex comparison.");
+ return Success((LHS.getComplexIntReal() != RHS.getComplexIntReal() ||
+ LHS.getComplexIntImag() != RHS.getComplexIntImag()), E);
+ }
+ }
+ }
+
+ if (LHSTy->isRealFloatingType() &&
+ RHSTy->isRealFloatingType()) {
+ APFloat RHS(0.0), LHS(0.0);
+
+ if (!EvaluateFloat(E->getRHS(), RHS, Info))
+ return false;
+
+ if (!EvaluateFloat(E->getLHS(), LHS, Info))
+ return false;
+
+ APFloat::cmpResult CR = LHS.compare(RHS);
+
+ switch (E->getOpcode()) {
+ default:
+ assert(0 && "Invalid binary operator!");
+ case BinaryOperator::LT:
+ return Success(CR == APFloat::cmpLessThan, E);
+ case BinaryOperator::GT:
+ return Success(CR == APFloat::cmpGreaterThan, E);
+ case BinaryOperator::LE:
+ return Success(CR == APFloat::cmpLessThan || CR == APFloat::cmpEqual, E);
+ case BinaryOperator::GE:
+ return Success(CR == APFloat::cmpGreaterThan || CR == APFloat::cmpEqual,
+ E);
+ case BinaryOperator::EQ:
+ return Success(CR == APFloat::cmpEqual, E);
+ case BinaryOperator::NE:
+ return Success(CR == APFloat::cmpGreaterThan
+ || CR == APFloat::cmpLessThan, E);
+ }
+ }
+
+ if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
+ if (E->getOpcode() == BinaryOperator::Sub || E->isEqualityOp()) {
+ APValue LHSValue;
+ if (!EvaluatePointer(E->getLHS(), LHSValue, Info))
+ return false;
+
+ APValue RHSValue;
+ if (!EvaluatePointer(E->getRHS(), RHSValue, Info))
+ return false;
+
+ // Reject any bases; this is conservative, but good enough for
+ // common uses
+ if (LHSValue.getLValueBase() || RHSValue.getLValueBase())
+ return false;
+
+ if (E->getOpcode() == BinaryOperator::Sub) {
+ const QualType Type = E->getLHS()->getType();
+ const QualType ElementType = Type->getAsPointerType()->getPointeeType();
+
+ uint64_t D = LHSValue.getLValueOffset() - RHSValue.getLValueOffset();
+ D /= Info.Ctx.getTypeSize(ElementType) / 8;
+
+ return Success(D, E);
+ }
+ bool Result;
+ if (E->getOpcode() == BinaryOperator::EQ) {
+ Result = LHSValue.getLValueOffset() == RHSValue.getLValueOffset();
+ } else {
+ Result = LHSValue.getLValueOffset() != RHSValue.getLValueOffset();
+ }
+ return Success(Result, E);
+ }
+ }
+ if (!LHSTy->isIntegralType() ||
+ !RHSTy->isIntegralType()) {
+ // We can't continue from here for non-integral types, and they
+ // could potentially confuse the following operations.
+ return false;
+ }
+
+ // The LHS of a constant expr is always evaluated and needed.
+ if (!Visit(E->getLHS()))
+ return false; // error in subexpression.
+
+ APValue RHSVal;
+ if (!EvaluateIntegerOrLValue(E->getRHS(), RHSVal, Info))
+ return false;
+
+ // Handle cases like (unsigned long)&a + 4.
+ if (E->isAdditiveOp() && Result.isLValue() && RHSVal.isInt()) {
+ uint64_t offset = Result.getLValueOffset();
+ if (E->getOpcode() == BinaryOperator::Add)
+ offset += RHSVal.getInt().getZExtValue();
+ else
+ offset -= RHSVal.getInt().getZExtValue();
+ Result = APValue(Result.getLValueBase(), offset);
+ return true;
+ }
+
+ // Handle cases like 4 + (unsigned long)&a
+ if (E->getOpcode() == BinaryOperator::Add &&
+ RHSVal.isLValue() && Result.isInt()) {
+ uint64_t offset = RHSVal.getLValueOffset();
+ offset += Result.getInt().getZExtValue();
+ Result = APValue(RHSVal.getLValueBase(), offset);
+ return true;
+ }
+
+ // All the following cases expect both operands to be an integer
+ if (!Result.isInt() || !RHSVal.isInt())
+ return false;
+
+ APSInt& RHS = RHSVal.getInt();
+
+ switch (E->getOpcode()) {
+ default:
+ return Error(E->getOperatorLoc(), diag::note_invalid_subexpr_in_ice, E);
+ case BinaryOperator::Mul: return Success(Result.getInt() * RHS, E);
+ case BinaryOperator::Add: return Success(Result.getInt() + RHS, E);
+ case BinaryOperator::Sub: return Success(Result.getInt() - RHS, E);
+ case BinaryOperator::And: return Success(Result.getInt() & RHS, E);
+ case BinaryOperator::Xor: return Success(Result.getInt() ^ RHS, E);
+ case BinaryOperator::Or: return Success(Result.getInt() | RHS, E);
+ case BinaryOperator::Div:
+ if (RHS == 0)
+ return Error(E->getOperatorLoc(), diag::note_expr_divide_by_zero, E);
+ return Success(Result.getInt() / RHS, E);
+ case BinaryOperator::Rem:
+ if (RHS == 0)
+ return Error(E->getOperatorLoc(), diag::note_expr_divide_by_zero, E);
+ return Success(Result.getInt() % RHS, E);
+ case BinaryOperator::Shl: {
+ // FIXME: Warn about out of range shift amounts!
+ unsigned SA =
+ (unsigned) RHS.getLimitedValue(Result.getInt().getBitWidth()-1);
+ return Success(Result.getInt() << SA, E);
+ }
+ case BinaryOperator::Shr: {
+ unsigned SA =
+ (unsigned) RHS.getLimitedValue(Result.getInt().getBitWidth()-1);
+ return Success(Result.getInt() >> SA, E);
+ }
+
+ case BinaryOperator::LT: return Success(Result.getInt() < RHS, E);
+ case BinaryOperator::GT: return Success(Result.getInt() > RHS, E);
+ case BinaryOperator::LE: return Success(Result.getInt() <= RHS, E);
+ case BinaryOperator::GE: return Success(Result.getInt() >= RHS, E);
+ case BinaryOperator::EQ: return Success(Result.getInt() == RHS, E);
+ case BinaryOperator::NE: return Success(Result.getInt() != RHS, E);
+ }
+}
+
+bool IntExprEvaluator::VisitConditionalOperator(const ConditionalOperator *E) {
+ bool Cond;
+ if (!HandleConversionToBool(E->getCond(), Cond, Info))
+ return false;
+
+ return Visit(Cond ? E->getTrueExpr() : E->getFalseExpr());
+}
+
+unsigned IntExprEvaluator::GetAlignOfType(QualType T) {
+ // Get information about the alignment.
+ unsigned CharSize = Info.Ctx.Target.getCharWidth();
+
+ // __alignof is defined to return the preferred alignment.
+ return Info.Ctx.getPreferredTypeAlign(T.getTypePtr()) / CharSize;
+}
+
+unsigned IntExprEvaluator::GetAlignOfExpr(const Expr *E) {
+ E = E->IgnoreParens();
+
+ // alignof decl is always accepted, even if it doesn't make sense: we default
+ // to 1 in those cases.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return Info.Ctx.getDeclAlignInBytes(DRE->getDecl());
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return Info.Ctx.getDeclAlignInBytes(ME->getMemberDecl());
+
+ return GetAlignOfType(E->getType());
+}
+
+
+/// VisitSizeAlignOfExpr - Evaluate a sizeof or alignof with a result as the
+/// expression's type.
+bool IntExprEvaluator::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
+ QualType DstTy = E->getType();
+
+ // Handle alignof separately.
+ if (!E->isSizeOf()) {
+ if (E->isArgumentType())
+ return Success(GetAlignOfType(E->getArgumentType()), E);
+ else
+ return Success(GetAlignOfExpr(E->getArgumentExpr()), E);
+ }
+
+ QualType SrcTy = E->getTypeOfArgument();
+
+ // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
+ // extension.
+ if (SrcTy->isVoidType() || SrcTy->isFunctionType())
+ return Success(1, E);
+
+ // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
+ if (!SrcTy->isConstantSizeType())
+ return false;
+
+ // Get information about the size.
+ unsigned BitWidth = Info.Ctx.getTypeSize(SrcTy);
+ return Success(BitWidth / Info.Ctx.Target.getCharWidth(), E);
+}
+
+bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ // Special case unary operators that do not need their subexpression
+ // evaluated. offsetof/sizeof/alignof are all special.
+ if (E->isOffsetOfOp()) {
+ // The AST for offsetof is defined in such a way that we can just
+ // directly Evaluate it as an l-value.
+ APValue LV;
+ if (!EvaluateLValue(E->getSubExpr(), LV, Info))
+ return false;
+ if (LV.getLValueBase())
+ return false;
+ return Success(LV.getLValueOffset(), E);
+ }
+
+ if (E->getOpcode() == UnaryOperator::LNot) {
+ // LNot's operand isn't necessarily an integer, so we handle it specially.
+ bool bres;
+ if (!HandleConversionToBool(E->getSubExpr(), bres, Info))
+ return false;
+ return Success(!bres, E);
+ }
+
+ // Only handle integral operations...
+ if (!E->getSubExpr()->getType()->isIntegralType())
+ return false;
+
+ // Get the operand value into 'Result'.
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ switch (E->getOpcode()) {
+ default:
+ // Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
+ // See C99 6.6p3.
+ return Error(E->getOperatorLoc(), diag::note_invalid_subexpr_in_ice, E);
+ case UnaryOperator::Extension:
+ // FIXME: Should extension allow i-c-e extension expressions in its scope?
+ // If so, we could clear the diagnostic ID.
+ return true;
+ case UnaryOperator::Plus:
+ // The result is always just the subexpr.
+ return true;
+ case UnaryOperator::Minus:
+ if (!Result.isInt()) return false;
+ return Success(-Result.getInt(), E);
+ case UnaryOperator::Not:
+ if (!Result.isInt()) return false;
+ return Success(~Result.getInt(), E);
+ }
+}
+
+/// HandleCast - This is used to evaluate implicit or explicit casts where the
+/// result type is integer.
+bool IntExprEvaluator::VisitCastExpr(CastExpr *E) {
+ Expr *SubExpr = E->getSubExpr();
+ QualType DestType = E->getType();
+ QualType SrcType = SubExpr->getType();
+
+ if (DestType->isBooleanType()) {
+ bool BoolResult;
+ if (!HandleConversionToBool(SubExpr, BoolResult, Info))
+ return false;
+ return Success(BoolResult, E);
+ }
+
+ // Handle simple integer->integer casts.
+ if (SrcType->isIntegralType()) {
+ if (!Visit(SubExpr))
+ return false;
+
+ if (!Result.isInt()) {
+ // Only allow casts of lvalues if they are lossless.
+ return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
+ }
+
+ return Success(HandleIntToIntCast(DestType, SrcType,
+ Result.getInt(), Info.Ctx), E);
+ }
+
+ // FIXME: Clean this up!
+ if (SrcType->isPointerType()) {
+ APValue LV;
+ if (!EvaluatePointer(SubExpr, LV, Info))
+ return false;
+
+ if (LV.getLValueBase()) {
+ // Only allow based lvalue casts if they are lossless.
+ if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
+ return false;
+
+ Result = LV;
+ return true;
+ }
+
+ APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset(), SrcType);
+ return Success(HandleIntToIntCast(DestType, SrcType, AsInt, Info.Ctx), E);
+ }
+
+ if (SrcType->isArrayType() || SrcType->isFunctionType()) {
+ // This handles double-conversion cases, where there's both
+ // an l-value promotion and an implicit conversion to int.
+ APValue LV;
+ if (!EvaluateLValue(SubExpr, LV, Info))
+ return false;
+
+ if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(Info.Ctx.VoidPtrTy))
+ return false;
+
+ Result = LV;
+ return true;
+ }
+
+ if (SrcType->isAnyComplexType()) {
+ APValue C;
+ if (!EvaluateComplex(SubExpr, C, Info))
+ return false;
+ if (C.isComplexFloat())
+ return Success(HandleFloatToIntCast(DestType, SrcType,
+ C.getComplexFloatReal(), Info.Ctx),
+ E);
+ else
+ return Success(HandleIntToIntCast(DestType, SrcType,
+ C.getComplexIntReal(), Info.Ctx), E);
+ }
+ // FIXME: Handle vectors
+
+ if (!SrcType->isRealFloatingType())
+ return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
+
+ APFloat F(0.0);
+ if (!EvaluateFloat(SubExpr, F, Info))
+ return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
+
+ return Success(HandleFloatToIntCast(DestType, SrcType, F, Info.Ctx), E);
+}
+
+bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ APValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info) || !LV.isComplexInt())
+ return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
+ return Success(LV.getComplexIntReal(), E);
+ }
+
+ return Visit(E->getSubExpr());
+}
+
+bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isComplexIntegerType()) {
+ APValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info) || !LV.isComplexInt())
+ return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
+ return Success(LV.getComplexIntImag(), E);
+ }
+
+ if (!E->getSubExpr()->isEvaluatable(Info.Ctx))
+ Info.EvalResult.HasSideEffects = true;
+ return Success(0, E);
+}
+
+//===----------------------------------------------------------------------===//
+// Float Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN FloatExprEvaluator
+ : public StmtVisitor<FloatExprEvaluator, bool> {
+ EvalInfo &Info;
+ APFloat &Result;
+public:
+ FloatExprEvaluator(EvalInfo &info, APFloat &result)
+ : Info(info), Result(result) {}
+
+ bool VisitStmt(Stmt *S) {
+ return false;
+ }
+
+ bool VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitCallExpr(const CallExpr *E);
+
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitFloatingLiteral(const FloatingLiteral *E);
+ bool VisitCastExpr(CastExpr *E);
+ bool VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+
+ bool VisitChooseExpr(const ChooseExpr *E)
+ { return Visit(E->getChosenSubExpr(Info.Ctx)); }
+ bool VisitUnaryExtension(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+
+ // FIXME: Missing: __real__/__imag__, array subscript of vector,
+ // member of vector, ImplicitValueInitExpr,
+ // conditional ?:, comma
+};
+} // end anonymous namespace
+
+static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
+ return FloatExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
+}
+
+bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (E->isBuiltinCall(Info.Ctx)) {
+ default: return false;
+ case Builtin::BI__builtin_huge_val:
+ case Builtin::BI__builtin_huge_valf:
+ case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_inf:
+ case Builtin::BI__builtin_inff:
+ case Builtin::BI__builtin_infl: {
+ const llvm::fltSemantics &Sem =
+ Info.Ctx.getFloatTypeSemantics(E->getType());
+ Result = llvm::APFloat::getInf(Sem);
+ return true;
+ }
+
+ case Builtin::BI__builtin_nan:
+ case Builtin::BI__builtin_nanf:
+ case Builtin::BI__builtin_nanl:
+ // If this is __builtin_nan() turn this into a nan, otherwise we
+ // can't constant fold it.
+ if (const StringLiteral *S =
+ dyn_cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())) {
+ if (!S->isWide()) {
+ const llvm::fltSemantics &Sem =
+ Info.Ctx.getFloatTypeSemantics(E->getType());
+ llvm::SmallString<16> s;
+ s.append(S->getStrData(), S->getStrData() + S->getByteLength());
+ s += '\0';
+ long l;
+ char *endp;
+ l = strtol(&s[0], &endp, 0);
+ if (endp != s.end()-1)
+ return false;
+ unsigned type = (unsigned int)l;;
+ Result = llvm::APFloat::getNaN(Sem, false, type);
+ return true;
+ }
+ }
+ return false;
+
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl:
+ if (!EvaluateFloat(E->getArg(0), Result, Info))
+ return false;
+
+ if (Result.isNegative())
+ Result.changeSign();
+ return true;
+
+ case Builtin::BI__builtin_copysign:
+ case Builtin::BI__builtin_copysignf:
+ case Builtin::BI__builtin_copysignl: {
+ APFloat RHS(0.);
+ if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+ !EvaluateFloat(E->getArg(1), RHS, Info))
+ return false;
+ Result.copySign(RHS);
+ return true;
+ }
+ }
+}
+
+bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ if (E->getOpcode() == UnaryOperator::Deref)
+ return false;
+
+ if (!EvaluateFloat(E->getSubExpr(), Result, Info))
+ return false;
+
+ switch (E->getOpcode()) {
+ default: return false;
+ case UnaryOperator::Plus:
+ return true;
+ case UnaryOperator::Minus:
+ Result.changeSign();
+ return true;
+ }
+}
+
+bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ // FIXME: Diagnostics? I really don't understand how the warnings
+ // and errors are supposed to work.
+ APFloat RHS(0.0);
+ if (!EvaluateFloat(E->getLHS(), Result, Info))
+ return false;
+ if (!EvaluateFloat(E->getRHS(), RHS, Info))
+ return false;
+
+ switch (E->getOpcode()) {
+ default: return false;
+ case BinaryOperator::Mul:
+ Result.multiply(RHS, APFloat::rmNearestTiesToEven);
+ return true;
+ case BinaryOperator::Add:
+ Result.add(RHS, APFloat::rmNearestTiesToEven);
+ return true;
+ case BinaryOperator::Sub:
+ Result.subtract(RHS, APFloat::rmNearestTiesToEven);
+ return true;
+ case BinaryOperator::Div:
+ Result.divide(RHS, APFloat::rmNearestTiesToEven);
+ return true;
+ }
+}
+
+bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
+ Result = E->getValue();
+ return true;
+}
+
+bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) {
+ Expr* SubExpr = E->getSubExpr();
+
+ if (SubExpr->getType()->isIntegralType()) {
+ APSInt IntResult;
+ if (!EvaluateInteger(SubExpr, IntResult, Info))
+ return false;
+ Result = HandleIntToFloatCast(E->getType(), SubExpr->getType(),
+ IntResult, Info.Ctx);
+ return true;
+ }
+ if (SubExpr->getType()->isRealFloatingType()) {
+ if (!Visit(SubExpr))
+ return false;
+ Result = HandleFloatToFloatCast(E->getType(), SubExpr->getType(),
+ Result, Info.Ctx);
+ return true;
+ }
+ // FIXME: Handle complex types
+
+ return false;
+}
+
+bool FloatExprEvaluator::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Complex Evaluation (for float and integer)
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN ComplexExprEvaluator
+ : public StmtVisitor<ComplexExprEvaluator, APValue> {
+ EvalInfo &Info;
+
+public:
+ ComplexExprEvaluator(EvalInfo &info) : Info(info) {}
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ APValue VisitStmt(Stmt *S) {
+ return APValue();
+ }
+
+ APValue VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+
+ APValue VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ Expr* SubExpr = E->getSubExpr();
+
+ if (SubExpr->getType()->isRealFloatingType()) {
+ APFloat Result(0.0);
+
+ if (!EvaluateFloat(SubExpr, Result, Info))
+ return APValue();
+
+ return APValue(APFloat(Result.getSemantics(), APFloat::fcZero, false),
+ Result);
+ } else {
+ assert(SubExpr->getType()->isIntegerType() &&
+ "Unexpected imaginary literal.");
+
+ llvm::APSInt Result;
+ if (!EvaluateInteger(SubExpr, Result, Info))
+ return APValue();
+
+ llvm::APSInt Zero(Result.getBitWidth(), !Result.isSigned());
+ Zero = 0;
+ return APValue(Zero, Result);
+ }
+ }
+
+ APValue VisitCastExpr(CastExpr *E) {
+ Expr* SubExpr = E->getSubExpr();
+ QualType EltType = E->getType()->getAsComplexType()->getElementType();
+ QualType SubType = SubExpr->getType();
+
+ if (SubType->isRealFloatingType()) {
+ APFloat Result(0.0);
+
+ if (!EvaluateFloat(SubExpr, Result, Info))
+ return APValue();
+
+ if (EltType->isRealFloatingType()) {
+ Result = HandleFloatToFloatCast(EltType, SubType, Result, Info.Ctx);
+ return APValue(Result,
+ APFloat(Result.getSemantics(), APFloat::fcZero, false));
+ } else {
+ llvm::APSInt IResult;
+ IResult = HandleFloatToIntCast(EltType, SubType, Result, Info.Ctx);
+ llvm::APSInt Zero(IResult.getBitWidth(), !IResult.isSigned());
+ Zero = 0;
+ return APValue(IResult, Zero);
+ }
+ } else if (SubType->isIntegerType()) {
+ APSInt Result;
+
+ if (!EvaluateInteger(SubExpr, Result, Info))
+ return APValue();
+
+ if (EltType->isRealFloatingType()) {
+ APFloat FResult =
+ HandleIntToFloatCast(EltType, SubType, Result, Info.Ctx);
+ return APValue(FResult,
+ APFloat(FResult.getSemantics(), APFloat::fcZero, false));
+ } else {
+ Result = HandleIntToIntCast(EltType, SubType, Result, Info.Ctx);
+ llvm::APSInt Zero(Result.getBitWidth(), !Result.isSigned());
+ Zero = 0;
+ return APValue(Result, Zero);
+ }
+ } else if (const ComplexType *CT = SubType->getAsComplexType()) {
+ APValue Src;
+
+ if (!EvaluateComplex(SubExpr, Src, Info))
+ return APValue();
+
+ QualType SrcType = CT->getElementType();
+
+ if (Src.isComplexFloat()) {
+ if (EltType->isRealFloatingType()) {
+ return APValue(HandleFloatToFloatCast(EltType, SrcType,
+ Src.getComplexFloatReal(),
+ Info.Ctx),
+ HandleFloatToFloatCast(EltType, SrcType,
+ Src.getComplexFloatImag(),
+ Info.Ctx));
+ } else {
+ return APValue(HandleFloatToIntCast(EltType, SrcType,
+ Src.getComplexFloatReal(),
+ Info.Ctx),
+ HandleFloatToIntCast(EltType, SrcType,
+ Src.getComplexFloatImag(),
+ Info.Ctx));
+ }
+ } else {
+ assert(Src.isComplexInt() && "Invalid evaluate result.");
+ if (EltType->isRealFloatingType()) {
+ return APValue(HandleIntToFloatCast(EltType, SrcType,
+ Src.getComplexIntReal(),
+ Info.Ctx),
+ HandleIntToFloatCast(EltType, SrcType,
+ Src.getComplexIntImag(),
+ Info.Ctx));
+ } else {
+ return APValue(HandleIntToIntCast(EltType, SrcType,
+ Src.getComplexIntReal(),
+ Info.Ctx),
+ HandleIntToIntCast(EltType, SrcType,
+ Src.getComplexIntImag(),
+ Info.Ctx));
+ }
+ }
+ }
+
+ // FIXME: Handle more casts.
+ return APValue();
+ }
+
+ APValue VisitBinaryOperator(const BinaryOperator *E);
+ APValue VisitChooseExpr(const ChooseExpr *E)
+ { return Visit(E->getChosenSubExpr(Info.Ctx)); }
+ APValue VisitUnaryExtension(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ // FIXME Missing: unary +/-/~, binary div, ImplicitValueInitExpr,
+ // conditional ?:, comma
+};
+} // end anonymous namespace
+
+static bool EvaluateComplex(const Expr *E, APValue &Result, EvalInfo &Info)
+{
+ Result = ComplexExprEvaluator(Info).Visit(const_cast<Expr*>(E));
+ assert((!Result.isComplexFloat() ||
+ (&Result.getComplexFloatReal().getSemantics() ==
+ &Result.getComplexFloatImag().getSemantics())) &&
+ "Invalid complex evaluation.");
+ return Result.isComplexFloat() || Result.isComplexInt();
+}
+
+APValue ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E)
+{
+ APValue Result, RHS;
+
+ if (!EvaluateComplex(E->getLHS(), Result, Info))
+ return APValue();
+
+ if (!EvaluateComplex(E->getRHS(), RHS, Info))
+ return APValue();
+
+ assert(Result.isComplexFloat() == RHS.isComplexFloat() &&
+ "Invalid operands to binary operator.");
+ switch (E->getOpcode()) {
+ default: return APValue();
+ case BinaryOperator::Add:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ } else {
+ Result.getComplexIntReal() += RHS.getComplexIntReal();
+ Result.getComplexIntImag() += RHS.getComplexIntImag();
+ }
+ break;
+ case BinaryOperator::Sub:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ } else {
+ Result.getComplexIntReal() -= RHS.getComplexIntReal();
+ Result.getComplexIntImag() -= RHS.getComplexIntImag();
+ }
+ break;
+ case BinaryOperator::Mul:
+ if (Result.isComplexFloat()) {
+ APValue LHS = Result;
+ APFloat &LHS_r = LHS.getComplexFloatReal();
+ APFloat &LHS_i = LHS.getComplexFloatImag();
+ APFloat &RHS_r = RHS.getComplexFloatReal();
+ APFloat &RHS_i = RHS.getComplexFloatImag();
+
+ APFloat Tmp = LHS_r;
+ Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatReal() = Tmp;
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatReal().subtract(Tmp, APFloat::rmNearestTiesToEven);
+
+ Tmp = LHS_r;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag() = Tmp;
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().add(Tmp, APFloat::rmNearestTiesToEven);
+ } else {
+ APValue LHS = Result;
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
+ LHS.getComplexIntImag() * RHS.getComplexIntImag());
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
+ LHS.getComplexIntImag() * RHS.getComplexIntReal());
+ }
+ break;
+ }
+
+ return Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Top level Expr::Evaluate method.
+//===----------------------------------------------------------------------===//
+
+/// Evaluate - Return true if this is a constant which we can fold using
+/// any crazy technique (that has nothing to do with language standards) that
+/// we want to. If this function returns true, it returns the folded constant
+/// in Result.
+bool Expr::Evaluate(EvalResult &Result, ASTContext &Ctx) const {
+ EvalInfo Info(Ctx, Result);
+
+ if (getType()->isVectorType()) {
+ if (!EvaluateVector(this, Result.Val, Info))
+ return false;
+ } else if (getType()->isIntegerType()) {
+ if (!IntExprEvaluator(Info, Result.Val).Visit(const_cast<Expr*>(this)))
+ return false;
+ } else if (getType()->hasPointerRepresentation()) {
+ if (!EvaluatePointer(this, Result.Val, Info))
+ return false;
+ } else if (getType()->isRealFloatingType()) {
+ llvm::APFloat f(0.0);
+ if (!EvaluateFloat(this, f, Info))
+ return false;
+
+ Result.Val = APValue(f);
+ } else if (getType()->isAnyComplexType()) {
+ if (!EvaluateComplex(this, Result.Val, Info))
+ return false;
+ } else
+ return false;
+
+ return true;
+}
+
+bool Expr::EvaluateAsLValue(EvalResult &Result, ASTContext &Ctx) const {
+ EvalInfo Info(Ctx, Result);
+
+ return EvaluateLValue(this, Result.Val, Info) && !Result.HasSideEffects;
+}
+
+/// isEvaluatable - Call Evaluate to see if this expression can be constant
+/// folded, but discard the result.
+bool Expr::isEvaluatable(ASTContext &Ctx) const {
+ EvalResult Result;
+ return Evaluate(Result, Ctx) && !Result.HasSideEffects;
+}
+
+APSInt Expr::EvaluateAsInt(ASTContext &Ctx) const {
+ EvalResult EvalResult;
+ bool Result = Evaluate(EvalResult, Ctx);
+ Result = Result;
+ assert(Result && "Could not evaluate expression");
+ assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
+
+ return EvalResult.Val.getInt();
+}
diff --git a/lib/AST/InheritViz.cpp b/lib/AST/InheritViz.cpp
new file mode 100644
index 0000000..dd2fc14
--- /dev/null
+++ b/lib/AST/InheritViz.cpp
@@ -0,0 +1,168 @@
+//===- InheritViz.cpp - Graphviz visualization for inheritance --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements CXXRecordDecl::viewInheritance, which
+// generates a GraphViz DOT file that depicts the class inheritance
+// diagram and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/TypeOrdering.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+using namespace llvm;
+
+namespace clang {
+
+/// InheritanceHierarchyWriter - Helper class that writes out a
+/// GraphViz file that diagrams the inheritance hierarchy starting at
+/// a given C++ class type. Note that we do not use LLVM's
+/// GraphWriter, because the interface does not permit us to properly
+/// differentiate between uses of types as virtual bases
+/// vs. non-virtual bases.
+class InheritanceHierarchyWriter {
+ ASTContext& Context;
+ llvm::raw_ostream &Out;
+ std::map<QualType, int, QualTypeOrdering> DirectBaseCount;
+ std::set<QualType, QualTypeOrdering> KnownVirtualBases;
+
+public:
+ InheritanceHierarchyWriter(ASTContext& Context, llvm::raw_ostream& Out)
+ : Context(Context), Out(Out) { }
+
+ void WriteGraph(QualType Type) {
+ Out << "digraph \"" << DOT::EscapeString(Type.getAsString()) << "\" {\n";
+ WriteNode(Type, false);
+ Out << "}\n";
+ }
+
+protected:
+ /// WriteNode - Write out the description of node in the inheritance
+ /// diagram, which may be a base class or it may be the root node.
+ void WriteNode(QualType Type, bool FromVirtual);
+
+ /// WriteNodeReference - Write out a reference to the given node,
+ /// using a unique identifier for each direct base and for the
+ /// (only) virtual base.
+ llvm::raw_ostream& WriteNodeReference(QualType Type, bool FromVirtual);
+};
+
+void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ if (FromVirtual) {
+ if (KnownVirtualBases.find(CanonType) != KnownVirtualBases.end())
+ return;
+
+ // We haven't seen this virtual base before, so display it and
+ // its bases.
+ KnownVirtualBases.insert(CanonType);
+ }
+
+ // Declare the node itself.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+
+ // Give the node a label based on the name of the class.
+ std::string TypeName = Type.getAsString();
+ Out << " [ shape=\"box\", label=\"" << DOT::EscapeString(TypeName);
+
+ // If the name of the class was a typedef or something different
+ // from the "real" class name, show the real class name in
+ // parentheses so we don't confuse ourselves.
+ if (TypeName != CanonType.getAsString()) {
+ Out << "\\n(" << CanonType.getAsString() << ")";
+ }
+
+ // Finished describing the node.
+ Out << " \"];\n";
+
+ // Display the base classes.
+ const CXXRecordDecl *Decl
+ = static_cast<const CXXRecordDecl *>(Type->getAsRecordType()->getDecl());
+ for (CXXRecordDecl::base_class_const_iterator Base = Decl->bases_begin();
+ Base != Decl->bases_end(); ++Base) {
+ QualType CanonBaseType = Context.getCanonicalType(Base->getType());
+
+ // If this is not virtual inheritance, bump the direct base
+ // count for the type.
+ if (!Base->isVirtual())
+ ++DirectBaseCount[CanonBaseType];
+
+ // Write out the node (if we need to).
+ WriteNode(Base->getType(), Base->isVirtual());
+
+ // Write out the edge.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+ Out << " -> ";
+ WriteNodeReference(Base->getType(), Base->isVirtual());
+
+ // Write out edge attributes to show the kind of inheritance.
+ if (Base->isVirtual()) {
+ Out << " [ style=\"dashed\" ]";
+ }
+ Out << ";";
+ }
+}
+
+/// WriteNodeReference - Write out a reference to the given node,
+/// using a unique identifier for each direct base and for the
+/// (only) virtual base.
+llvm::raw_ostream&
+InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
+ bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ Out << "Class_" << CanonType.getAsOpaquePtr();
+ if (!FromVirtual)
+ Out << "_" << DirectBaseCount[CanonType];
+ return Out;
+}
+
+/// viewInheritance - Display the inheritance hierarchy of this C++
+/// class using GraphViz.
+void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
+ QualType Self = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
+ std::string ErrMsg;
+ sys::Path Filename = sys::Path::GetTemporaryDirectory(&ErrMsg);
+ if (Filename.isEmpty()) {
+ llvm::errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+ Filename.appendComponent(Self.getAsString() + ".dot");
+ if (Filename.makeUnique(true,&ErrMsg)) {
+ llvm::errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+
+ llvm::errs() << "Writing '" << Filename.c_str() << "'... ";
+
+ llvm::raw_fd_ostream O(Filename.c_str(), false, ErrMsg);
+
+ if (ErrMsg.empty()) {
+ InheritanceHierarchyWriter Writer(Context, O);
+ Writer.WriteGraph(Self);
+ llvm::errs() << " done. \n";
+
+ O.close();
+
+ // Display the graph
+ DisplayGraph(Filename);
+ } else {
+ llvm::errs() << "error opening file for writing!\n";
+ }
+}
+
+}
diff --git a/lib/AST/Makefile b/lib/AST/Makefile
new file mode 100644
index 0000000..f7d4e9f
--- /dev/null
+++ b/lib/AST/Makefile
@@ -0,0 +1,22 @@
+##===- clang/lib/AST/Makefile ------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST library for the C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangAST
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
new file mode 100644
index 0000000..09522a2
--- /dev/null
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -0,0 +1,160 @@
+//===--- NestedNameSpecifier.cpp - C++ nested name specifiers -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NestedNameSpecifier class, which represents
+// a C++ nested-name-specifier.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Type.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace clang;
+
+NestedNameSpecifier *
+NestedNameSpecifier::FindOrInsert(ASTContext &Context,
+ const NestedNameSpecifier &Mockup) {
+ llvm::FoldingSetNodeID ID;
+ Mockup.Profile(ID);
+
+ void *InsertPos = 0;
+ NestedNameSpecifier *NNS
+ = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
+ if (!NNS) {
+ NNS = new (Context, 4) NestedNameSpecifier(Mockup);
+ Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
+ }
+
+ return NNS;
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
+ IdentifierInfo *II) {
+ assert(II && "Identifier cannot be NULL");
+ assert(Prefix && Prefix->isDependent() && "Prefix must be dependent");
+
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(Identifier);
+ Mockup.Specifier = II;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
+ NamespaceDecl *NS) {
+ assert(NS && "Namespace cannot be NULL");
+ assert((!Prefix ||
+ (Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) &&
+ "Broken nested name specifier");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(Namespace);
+ Mockup.Specifier = NS;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(ASTContext &Context, NestedNameSpecifier *Prefix,
+ bool Template, Type *T) {
+ assert(T && "Type cannot be NULL");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(Template? TypeSpecWithTemplate : TypeSpec);
+ Mockup.Specifier = T;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *NestedNameSpecifier::GlobalSpecifier(ASTContext &Context) {
+ if (!Context.GlobalNestedNameSpecifier)
+ Context.GlobalNestedNameSpecifier = new (Context, 4) NestedNameSpecifier();
+ return Context.GlobalNestedNameSpecifier;
+}
+
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isDependentType();
+ }
+
+ // Necessary to suppress a GCC warning.
+ return false;
+}
+
+/// \brief Print this nested name specifier to the given output
+/// stream.
+void
+NestedNameSpecifier::print(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ if (getPrefix())
+ getPrefix()->print(OS, Policy);
+
+ switch (getKind()) {
+ case Identifier:
+ OS << getAsIdentifier()->getName();
+ break;
+
+ case Namespace:
+ OS << getAsNamespace()->getIdentifier()->getName();
+ break;
+
+ case Global:
+ break;
+
+ case TypeSpecWithTemplate:
+ OS << "template ";
+ // Fall through to print the type.
+
+ case TypeSpec: {
+ std::string TypeStr;
+ Type *T = getAsType();
+
+ // If this is a qualified name type, suppress the qualification:
+ // it's part of our nested-name-specifier sequence anyway. FIXME:
+ // We should be able to assert that this doesn't happen.
+ if (const QualifiedNameType *QualT = dyn_cast<QualifiedNameType>(T))
+ T = QualT->getNamedType().getTypePtr();
+
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressTagKind = true;
+ T->getAsStringInternal(TypeStr, InnerPolicy);
+ OS << TypeStr;
+ break;
+ }
+ }
+
+ OS << "::";
+}
+
+void NestedNameSpecifier::Destroy(ASTContext &Context) {
+ this->~NestedNameSpecifier();
+ Context.Deallocate((void *)this);
+}
+
+void NestedNameSpecifier::dump() {
+ PrintingPolicy Policy;
+ Policy.CPlusPlus = true;
+ print(llvm::errs(), Policy);
+}
diff --git a/lib/AST/ParentMap.cpp b/lib/AST/ParentMap.cpp
new file mode 100644
index 0000000..9d87daa
--- /dev/null
+++ b/lib/AST/ParentMap.cpp
@@ -0,0 +1,94 @@
+//===--- ParentMap.cpp - Mappings from Stmts to their Parents ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ParentMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
+
+static void BuildParentMap(MapTy& M, Stmt* S) {
+ for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E; ++I)
+ if (*I) {
+ M[*I] = S;
+ BuildParentMap(M, *I);
+ }
+}
+
+ParentMap::ParentMap(Stmt* S) : Impl(0) {
+ if (S) {
+ MapTy *M = new MapTy();
+ BuildParentMap(*M, S);
+ Impl = M;
+ }
+}
+
+ParentMap::~ParentMap() {
+ delete (MapTy*) Impl;
+}
+
+Stmt* ParentMap::getParent(Stmt* S) const {
+ MapTy* M = (MapTy*) Impl;
+ MapTy::iterator I = M->find(S);
+ return I == M->end() ? 0 : I->second;
+}
+
+Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
+ do { S = getParent(S); } while (S && isa<ParenExpr>(S));
+ return S;
+}
+
+bool ParentMap::isConsumedExpr(Expr* E) const {
+ Stmt *P = getParent(E);
+ Stmt *DirectChild = E;
+
+ // Ignore parents that are parentheses or casts.
+ while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P))) {
+ DirectChild = P;
+ P = getParent(P);
+ }
+
+ if (!P)
+ return false;
+
+ switch (P->getStmtClass()) {
+ default:
+ return isa<Expr>(P);
+ case Stmt::DeclStmtClass:
+ return true;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator *BE = cast<BinaryOperator>(P);
+ // If it is a comma, only the right side is consumed.
+ // If it isn't a comma, both sides are consumed.
+ return BE->getOpcode()!=BinaryOperator::Comma ||DirectChild==BE->getRHS();
+ }
+ case Stmt::ForStmtClass:
+ return DirectChild == cast<ForStmt>(P)->getCond();
+ case Stmt::WhileStmtClass:
+ return DirectChild == cast<WhileStmt>(P)->getCond();
+ case Stmt::DoStmtClass:
+ return DirectChild == cast<DoStmt>(P)->getCond();
+ case Stmt::IfStmtClass:
+ return DirectChild == cast<IfStmt>(P)->getCond();
+ case Stmt::IndirectGotoStmtClass:
+ return DirectChild == cast<IndirectGotoStmt>(P)->getTarget();
+ case Stmt::SwitchStmtClass:
+ return DirectChild == cast<SwitchStmt>(P)->getCond();
+ case Stmt::ReturnStmtClass:
+ return true;
+ }
+}
+
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
new file mode 100644
index 0000000..1757791
--- /dev/null
+++ b/lib/AST/Stmt.cpp
@@ -0,0 +1,587 @@
+//===--- Stmt.cpp - Statement AST Node Implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt class and statement subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Stmt.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+using namespace clang;
+
+static struct StmtClassNameTable {
+ const char *Name;
+ unsigned Counter;
+ unsigned Size;
+} StmtClassInfo[Stmt::lastExprConstant+1];
+
+static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
+ static bool Initialized = false;
+ if (Initialized)
+ return StmtClassInfo[E];
+
+ // Intialize the table on the first use.
+ Initialized = true;
+#define STMT(CLASS, PARENT) \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Name = #CLASS; \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Size = sizeof(CLASS);
+#include "clang/AST/StmtNodes.def"
+
+ return StmtClassInfo[E];
+}
+
+const char *Stmt::getStmtClassName() const {
+ return getStmtInfoTableEntry(sClass).Name;
+}
+
+void Stmt::DestroyChildren(ASTContext &C) {
+ for (child_iterator I = child_begin(), E = child_end(); I !=E; )
+ if (Stmt* Child = *I++) Child->Destroy(C);
+}
+
+void Stmt::Destroy(ASTContext &C) {
+ DestroyChildren(C);
+ // FIXME: Eventually all Stmts should be allocated with the allocator
+ // in ASTContext, just like with Decls.
+ this->~Stmt();
+ C.Deallocate((void *)this);
+}
+
+void DeclStmt::Destroy(ASTContext &C) {
+ this->~DeclStmt();
+ C.Deallocate((void *)this);
+}
+
+void Stmt::PrintStats() {
+ // Ensure the table is primed.
+ getStmtInfoTableEntry(Stmt::NullStmtClass);
+
+ unsigned sum = 0;
+ fprintf(stderr, "*** Stmt/Expr Stats:\n");
+ for (int i = 0; i != Stmt::lastExprConstant+1; i++) {
+ if (StmtClassInfo[i].Name == 0) continue;
+ sum += StmtClassInfo[i].Counter;
+ }
+ fprintf(stderr, " %d stmts/exprs total.\n", sum);
+ sum = 0;
+ for (int i = 0; i != Stmt::lastExprConstant+1; i++) {
+ if (StmtClassInfo[i].Name == 0) continue;
+ if (StmtClassInfo[i].Counter == 0) continue;
+ fprintf(stderr, " %d %s, %d each (%d bytes)\n",
+ StmtClassInfo[i].Counter, StmtClassInfo[i].Name,
+ StmtClassInfo[i].Size,
+ StmtClassInfo[i].Counter*StmtClassInfo[i].Size);
+ sum += StmtClassInfo[i].Counter*StmtClassInfo[i].Size;
+ }
+ fprintf(stderr, "Total bytes = %d\n", sum);
+}
+
+void Stmt::addStmtClass(StmtClass s) {
+ ++getStmtInfoTableEntry(s).Counter;
+}
+
+static bool StatSwitch = false;
+
+bool Stmt::CollectingStats(bool enable) {
+ if (enable) StatSwitch = true;
+ return StatSwitch;
+}
+
+NullStmt* NullStmt::Clone(ASTContext &C) const {
+ return new (C) NullStmt(SemiLoc);
+}
+
+ContinueStmt* ContinueStmt::Clone(ASTContext &C) const {
+ return new (C) ContinueStmt(ContinueLoc);
+}
+
+BreakStmt* BreakStmt::Clone(ASTContext &C) const {
+ return new (C) BreakStmt(BreakLoc);
+}
+
+void CompoundStmt::setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts) {
+ if (this->Body)
+ C.Deallocate(Body);
+ this->NumStmts = NumStmts;
+
+ Body = new (C) Stmt*[NumStmts];
+ memcpy(Body, Stmts, sizeof(Stmt *) * NumStmts);
+}
+
+const char *LabelStmt::getName() const {
+ return getID()->getName();
+}
+
+// This is defined here to avoid polluting Stmt.h with importing Expr.h
+SourceRange ReturnStmt::getSourceRange() const {
+ if (RetExpr)
+ return SourceRange(RetLoc, RetExpr->getLocEnd());
+ else
+ return SourceRange(RetLoc);
+}
+
+bool Stmt::hasImplicitControlFlow() const {
+ switch (sClass) {
+ default:
+ return false;
+
+ case CallExprClass:
+ case ConditionalOperatorClass:
+ case ChooseExprClass:
+ case StmtExprClass:
+ case DeclStmtClass:
+ return true;
+
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator* B = cast<BinaryOperator>(this);
+ if (B->isLogicalOp() || B->getOpcode() == BinaryOperator::Comma)
+ return true;
+ else
+ return false;
+ }
+ }
+}
+
+Expr *AsmStmt::getOutputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i]);
+}
+
+/// getOutputConstraint - Return the constraint string for the specified
+/// output operand. All output constraints are known to be non-empty (either
+/// '=' or '+').
+std::string AsmStmt::getOutputConstraint(unsigned i) const {
+ return std::string(Constraints[i]->getStrData(),
+ Constraints[i]->getByteLength());
+}
+
+/// getNumPlusOperands - Return the number of output operands that have a "+"
+/// constraint.
+unsigned AsmStmt::getNumPlusOperands() const {
+ unsigned Res = 0;
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i)
+ if (isOutputPlusConstraint(i))
+ ++Res;
+ return Res;
+}
+
+
+
+Expr *AsmStmt::getInputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i + NumOutputs]);
+}
+
+/// getInputConstraint - Return the specified input constraint. Unlike output
+/// constraints, these can be empty.
+std::string AsmStmt::getInputConstraint(unsigned i) const {
+ return std::string(Constraints[i + NumOutputs]->getStrData(),
+ Constraints[i + NumOutputs]->getByteLength());
+}
+
+
+void AsmStmt::setOutputsAndInputs(unsigned NumOutputs,
+ unsigned NumInputs,
+ const std::string *Names,
+ StringLiteral **Constraints,
+ Stmt **Exprs) {
+ this->NumOutputs = NumOutputs;
+ this->NumInputs = NumInputs;
+ this->Names.clear();
+ this->Names.insert(this->Names.end(), Names, Names + NumOutputs + NumInputs);
+ this->Constraints.clear();
+ this->Constraints.insert(this->Constraints.end(),
+ Constraints, Constraints + NumOutputs + NumInputs);
+ this->Exprs.clear();
+ this->Exprs.insert(this->Exprs.end(), Exprs, Exprs + NumOutputs + NumInputs);
+}
+
+/// getNamedOperand - Given a symbolic operand reference like %[foo],
+/// translate this into a numeric value needed to reference the same operand.
+/// This returns -1 if the operand name is invalid.
+int AsmStmt::getNamedOperand(const std::string &SymbolicName) const {
+ unsigned NumPlusOperands = 0;
+
+ // Check if this is an output operand.
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) {
+ if (getOutputName(i) == SymbolicName)
+ return i;
+ }
+
+ for (unsigned i = 0, e = getNumInputs(); i != e; ++i)
+ if (getInputName(i) == SymbolicName)
+ return getNumOutputs() + NumPlusOperands + i;
+
+ // Not found.
+ return -1;
+}
+
+void AsmStmt::setClobbers(StringLiteral **Clobbers, unsigned NumClobbers) {
+ this->Clobbers.clear();
+ this->Clobbers.insert(this->Clobbers.end(), Clobbers, Clobbers + NumClobbers);
+}
+
+/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
+/// it into pieces. If the asm string is erroneous, emit errors and return
+/// true, otherwise return false.
+unsigned AsmStmt::AnalyzeAsmString(llvm::SmallVectorImpl<AsmStringPiece>&Pieces,
+ ASTContext &C, unsigned &DiagOffs) const {
+ const char *StrStart = getAsmString()->getStrData();
+ const char *StrEnd = StrStart + getAsmString()->getByteLength();
+ const char *CurPtr = StrStart;
+
+ // "Simple" inline asms have no constraints or operands, just convert the asm
+ // string to escape $'s.
+ if (isSimple()) {
+ std::string Result;
+ for (; CurPtr != StrEnd; ++CurPtr) {
+ switch (*CurPtr) {
+ case '$':
+ Result += "$$";
+ break;
+ default:
+ Result += *CurPtr;
+ break;
+ }
+ }
+ Pieces.push_back(AsmStringPiece(Result));
+ return 0;
+ }
+
+ // CurStringPiece - The current string that we are building up as we scan the
+ // asm string.
+ std::string CurStringPiece;
+
+ while (1) {
+ // Done with the string?
+ if (CurPtr == StrEnd) {
+ if (!CurStringPiece.empty())
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ return 0;
+ }
+
+ char CurChar = *CurPtr++;
+ if (CurChar == '$') {
+ CurStringPiece += "$$";
+ continue;
+ } else if (CurChar != '%') {
+ CurStringPiece += CurChar;
+ continue;
+ }
+
+ // Escaped "%" character in asm string.
+ if (CurPtr == StrEnd) {
+ // % at end of string is invalid (no escape).
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+
+ char EscapedChar = *CurPtr++;
+ if (EscapedChar == '%') { // %% -> %
+ // Escaped percentage sign.
+ CurStringPiece += '%';
+ continue;
+ }
+
+ if (EscapedChar == '=') { // %= -> Generate an unique ID.
+ CurStringPiece += "${:uid}";
+ continue;
+ }
+
+ // Otherwise, we have an operand. If we have accumulated a string so far,
+ // add it to the Pieces list.
+ if (!CurStringPiece.empty()) {
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ CurStringPiece.clear();
+ }
+
+ // Handle %x4 and %x[foo] by capturing x as the modifier character.
+ char Modifier = '\0';
+ if (isalpha(EscapedChar)) {
+ Modifier = EscapedChar;
+ EscapedChar = *CurPtr++;
+ }
+
+ if (isdigit(EscapedChar)) {
+ // %n - Assembler operand n
+ unsigned N = 0;
+
+ --CurPtr;
+ while (CurPtr != StrEnd && isdigit(*CurPtr))
+ N = N*10 + ((*CurPtr++)-'0');
+
+ unsigned NumOperands =
+ getNumOutputs() + getNumPlusOperands() + getNumInputs();
+ if (N >= NumOperands) {
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_operand_number;
+ }
+
+ Pieces.push_back(AsmStringPiece(N, Modifier));
+ continue;
+ }
+
+ // Handle %[foo], a symbolic operand reference.
+ if (EscapedChar == '[') {
+ DiagOffs = CurPtr-StrStart-1;
+
+ // Find the ']'.
+ const char *NameEnd = (const char*)memchr(CurPtr, ']', StrEnd-CurPtr);
+ if (NameEnd == 0)
+ return diag::err_asm_unterminated_symbolic_operand_name;
+ if (NameEnd == CurPtr)
+ return diag::err_asm_empty_symbolic_operand_name;
+
+ std::string SymbolicName(CurPtr, NameEnd);
+
+ int N = getNamedOperand(SymbolicName);
+ if (N == -1) {
+ // Verify that an operand with that name exists.
+ DiagOffs = CurPtr-StrStart;
+ return diag::err_asm_unknown_symbolic_operand_name;
+ }
+ Pieces.push_back(AsmStringPiece(N, Modifier));
+
+ CurPtr = NameEnd+1;
+ continue;
+ }
+
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Constructors
+//===----------------------------------------------------------------------===//
+
+AsmStmt::AsmStmt(SourceLocation asmloc, bool issimple, bool isvolatile,
+ unsigned numoutputs, unsigned numinputs,
+ std::string *names, StringLiteral **constraints,
+ Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
+ StringLiteral **clobbers, SourceLocation rparenloc)
+ : Stmt(AsmStmtClass), AsmLoc(asmloc), RParenLoc(rparenloc), AsmStr(asmstr)
+ , IsSimple(issimple), IsVolatile(isvolatile)
+ , NumOutputs(numoutputs), NumInputs(numinputs) {
+ for (unsigned i = 0, e = numinputs + numoutputs; i != e; i++) {
+ Names.push_back(names[i]);
+ Exprs.push_back(exprs[i]);
+ Constraints.push_back(constraints[i]);
+ }
+
+ for (unsigned i = 0; i != numclobbers; i++)
+ Clobbers.push_back(clobbers[i]);
+}
+
+ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
+ Stmt *Body, SourceLocation FCL,
+ SourceLocation RPL)
+: Stmt(ObjCForCollectionStmtClass) {
+ SubExprs[ELEM] = Elem;
+ SubExprs[COLLECTION] = reinterpret_cast<Stmt*>(Collect);
+ SubExprs[BODY] = Body;
+ ForLoc = FCL;
+ RParenLoc = RPL;
+}
+
+
+ObjCAtCatchStmt::ObjCAtCatchStmt(SourceLocation atCatchLoc,
+ SourceLocation rparenloc,
+ ParmVarDecl *catchVarDecl, Stmt *atCatchStmt,
+ Stmt *atCatchList)
+: Stmt(ObjCAtCatchStmtClass) {
+ ExceptionDecl = catchVarDecl;
+ SubExprs[BODY] = atCatchStmt;
+ SubExprs[NEXT_CATCH] = NULL;
+ // FIXME: O(N^2) in number of catch blocks.
+ if (atCatchList) {
+ ObjCAtCatchStmt *AtCatchList = static_cast<ObjCAtCatchStmt*>(atCatchList);
+
+ while (ObjCAtCatchStmt* NextCatch = AtCatchList->getNextCatchStmt())
+ AtCatchList = NextCatch;
+
+ AtCatchList->SubExprs[NEXT_CATCH] = this;
+ }
+ AtCatchLoc = atCatchLoc;
+ RParenLoc = rparenloc;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+// DeclStmt
+Stmt::child_iterator DeclStmt::child_begin() {
+ return StmtIterator(DG.begin(), DG.end());
+}
+
+Stmt::child_iterator DeclStmt::child_end() {
+ return StmtIterator(DG.end(), DG.end());
+}
+
+// NullStmt
+Stmt::child_iterator NullStmt::child_begin() { return child_iterator(); }
+Stmt::child_iterator NullStmt::child_end() { return child_iterator(); }
+
+// CompoundStmt
+Stmt::child_iterator CompoundStmt::child_begin() { return &Body[0]; }
+Stmt::child_iterator CompoundStmt::child_end() { return &Body[0]+NumStmts; }
+
+// CaseStmt
+Stmt::child_iterator CaseStmt::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator CaseStmt::child_end() { return &SubExprs[END_EXPR]; }
+
+// DefaultStmt
+Stmt::child_iterator DefaultStmt::child_begin() { return &SubStmt; }
+Stmt::child_iterator DefaultStmt::child_end() { return &SubStmt+1; }
+
+// LabelStmt
+Stmt::child_iterator LabelStmt::child_begin() { return &SubStmt; }
+Stmt::child_iterator LabelStmt::child_end() { return &SubStmt+1; }
+
+// IfStmt
+Stmt::child_iterator IfStmt::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator IfStmt::child_end() { return &SubExprs[0]+END_EXPR; }
+
+// SwitchStmt
+Stmt::child_iterator SwitchStmt::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator SwitchStmt::child_end() { return &SubExprs[0]+END_EXPR; }
+
+// WhileStmt
+Stmt::child_iterator WhileStmt::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator WhileStmt::child_end() { return &SubExprs[0]+END_EXPR; }
+
+// DoStmt
+Stmt::child_iterator DoStmt::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator DoStmt::child_end() { return &SubExprs[0]+END_EXPR; }
+
+// ForStmt
+Stmt::child_iterator ForStmt::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator ForStmt::child_end() { return &SubExprs[0]+END_EXPR; }
+
+// ObjCForCollectionStmt
+Stmt::child_iterator ObjCForCollectionStmt::child_begin() {
+ return &SubExprs[0];
+}
+Stmt::child_iterator ObjCForCollectionStmt::child_end() {
+ return &SubExprs[0]+END_EXPR;
+}
+
+// GotoStmt
+Stmt::child_iterator GotoStmt::child_begin() { return child_iterator(); }
+Stmt::child_iterator GotoStmt::child_end() { return child_iterator(); }
+
+// IndirectGotoStmt
+Expr* IndirectGotoStmt::getTarget() { return cast<Expr>(Target); }
+const Expr* IndirectGotoStmt::getTarget() const { return cast<Expr>(Target); }
+
+Stmt::child_iterator IndirectGotoStmt::child_begin() { return &Target; }
+Stmt::child_iterator IndirectGotoStmt::child_end() { return &Target+1; }
+
+// ContinueStmt
+Stmt::child_iterator ContinueStmt::child_begin() { return child_iterator(); }
+Stmt::child_iterator ContinueStmt::child_end() { return child_iterator(); }
+
+// BreakStmt
+Stmt::child_iterator BreakStmt::child_begin() { return child_iterator(); }
+Stmt::child_iterator BreakStmt::child_end() { return child_iterator(); }
+
+// ReturnStmt
+const Expr* ReturnStmt::getRetValue() const {
+ return cast_or_null<Expr>(RetExpr);
+}
+Expr* ReturnStmt::getRetValue() {
+ return cast_or_null<Expr>(RetExpr);
+}
+
+Stmt::child_iterator ReturnStmt::child_begin() {
+ return &RetExpr;
+}
+Stmt::child_iterator ReturnStmt::child_end() {
+ return RetExpr ? &RetExpr+1 : &RetExpr;
+}
+
+// AsmStmt
+Stmt::child_iterator AsmStmt::child_begin() {
+ return Exprs.empty() ? 0 : &Exprs[0];
+}
+Stmt::child_iterator AsmStmt::child_end() {
+ return Exprs.empty() ? 0 : &Exprs[0] + Exprs.size();
+}
+
+// ObjCAtCatchStmt
+Stmt::child_iterator ObjCAtCatchStmt::child_begin() { return &SubExprs[0]; }
+Stmt::child_iterator ObjCAtCatchStmt::child_end() {
+ return &SubExprs[0]+END_EXPR;
+}
+
+// ObjCAtFinallyStmt
+Stmt::child_iterator ObjCAtFinallyStmt::child_begin() { return &AtFinallyStmt; }
+Stmt::child_iterator ObjCAtFinallyStmt::child_end() { return &AtFinallyStmt+1; }
+
+// ObjCAtTryStmt
+Stmt::child_iterator ObjCAtTryStmt::child_begin() { return &SubStmts[0]; }
+Stmt::child_iterator ObjCAtTryStmt::child_end() {
+ return &SubStmts[0]+END_EXPR;
+}
+
+// ObjCAtThrowStmt
+Stmt::child_iterator ObjCAtThrowStmt::child_begin() {
+ return &Throw;
+}
+
+Stmt::child_iterator ObjCAtThrowStmt::child_end() {
+ return &Throw+1;
+}
+
+// ObjCAtSynchronizedStmt
+Stmt::child_iterator ObjCAtSynchronizedStmt::child_begin() {
+ return &SubStmts[0];
+}
+
+Stmt::child_iterator ObjCAtSynchronizedStmt::child_end() {
+ return &SubStmts[0]+END_EXPR;
+}
+
+// CXXCatchStmt
+Stmt::child_iterator CXXCatchStmt::child_begin() {
+ return &HandlerBlock;
+}
+
+Stmt::child_iterator CXXCatchStmt::child_end() {
+ return &HandlerBlock + 1;
+}
+
+QualType CXXCatchStmt::getCaughtType() {
+ if (ExceptionDecl)
+ return ExceptionDecl->getType();
+ return QualType();
+}
+
+void CXXCatchStmt::Destroy(ASTContext& C) {
+ if (ExceptionDecl)
+ ExceptionDecl->Destroy(C);
+ Stmt::Destroy(C);
+}
+
+// CXXTryStmt
+Stmt::child_iterator CXXTryStmt::child_begin() { return &Stmts[0]; }
+Stmt::child_iterator CXXTryStmt::child_end() { return &Stmts[0]+Stmts.size(); }
+
+CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
+ Stmt **handlers, unsigned numHandlers)
+ : Stmt(CXXTryStmtClass), TryLoc(tryLoc) {
+ Stmts.push_back(tryBlock);
+ Stmts.insert(Stmts.end(), handlers, handlers + numHandlers);
+}
diff --git a/lib/AST/StmtDumper.cpp b/lib/AST/StmtDumper.cpp
new file mode 100644
index 0000000..b24e912
--- /dev/null
+++ b/lib/AST/StmtDumper.cpp
@@ -0,0 +1,542 @@
+//===--- StmtDumper.cpp - Dumping implementation for Stmt ASTs ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dump/Stmt::print methods, which dump out the
+// AST in a form that exposes type details and other fields.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Compiler.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtDumper Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VISIBILITY_HIDDEN StmtDumper : public StmtVisitor<StmtDumper> {
+ SourceManager *SM;
+ FILE *F;
+ unsigned IndentLevel;
+
+ /// MaxDepth - When doing a normal dump (not dumpAll) we only want to dump
+ /// the first few levels of an AST. This keeps track of how many ast levels
+ /// are left.
+ unsigned MaxDepth;
+
+ /// LastLocFilename/LastLocLine - Keep track of the last location we print
+ /// out so that we can print out deltas from then on out.
+ const char *LastLocFilename;
+ unsigned LastLocLine;
+
+ PrintingPolicy Policy;
+ public:
+ StmtDumper(SourceManager *sm, FILE *f, unsigned maxDepth)
+ : SM(sm), F(f), IndentLevel(0-1), MaxDepth(maxDepth) {
+ LastLocFilename = "";
+ LastLocLine = ~0U;
+ }
+
+ void DumpSubTree(Stmt *S) {
+ // Prune the recursion if not using dump all.
+ if (MaxDepth == 0) return;
+
+ ++IndentLevel;
+ if (S) {
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
+ VisitDeclStmt(DS);
+ else {
+ Visit(S);
+
+ // Print out children.
+ Stmt::child_iterator CI = S->child_begin(), CE = S->child_end();
+ if (CI != CE) {
+ while (CI != CE) {
+ fprintf(F, "\n");
+ DumpSubTree(*CI++);
+ }
+ }
+ fprintf(F, ")");
+ }
+ } else {
+ Indent();
+ fprintf(F, "<<<NULL>>>");
+ }
+ --IndentLevel;
+ }
+
+ void DumpDeclarator(Decl *D);
+
+ void Indent() const {
+ for (int i = 0, e = IndentLevel; i < e; ++i)
+ fprintf(F, " ");
+ }
+
+ void DumpType(QualType T) {
+ fprintf(F, "'%s'", T.getAsString().c_str());
+
+ if (!T.isNull()) {
+ // If the type is directly a typedef, strip off typedefness to give at
+ // least one level of concreteness.
+ if (TypedefType *TDT = dyn_cast<TypedefType>(T)) {
+ QualType Simplified =
+ TDT->LookThroughTypedefs().getQualifiedType(T.getCVRQualifiers());
+ fprintf(F, ":'%s'", Simplified.getAsString().c_str());
+ }
+ }
+ }
+ void DumpStmt(const Stmt *Node) {
+ Indent();
+ fprintf(F, "(%s %p", Node->getStmtClassName(), (void*)Node);
+ DumpSourceRange(Node);
+ }
+ void DumpExpr(const Expr *Node) {
+ DumpStmt(Node);
+ fprintf(F, " ");
+ DumpType(Node->getType());
+ }
+ void DumpSourceRange(const Stmt *Node);
+ void DumpLocation(SourceLocation Loc);
+
+ // Stmts.
+ void VisitStmt(Stmt *Node);
+ void VisitDeclStmt(DeclStmt *Node);
+ void VisitLabelStmt(LabelStmt *Node);
+ void VisitGotoStmt(GotoStmt *Node);
+
+ // Exprs
+ void VisitExpr(Expr *Node);
+ void VisitDeclRefExpr(DeclRefExpr *Node);
+ void VisitPredefinedExpr(PredefinedExpr *Node);
+ void VisitCharacterLiteral(CharacterLiteral *Node);
+ void VisitIntegerLiteral(IntegerLiteral *Node);
+ void VisitFloatingLiteral(FloatingLiteral *Node);
+ void VisitStringLiteral(StringLiteral *Str);
+ void VisitUnaryOperator(UnaryOperator *Node);
+ void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node);
+ void VisitMemberExpr(MemberExpr *Node);
+ void VisitExtVectorElementExpr(ExtVectorElementExpr *Node);
+ void VisitBinaryOperator(BinaryOperator *Node);
+ void VisitCompoundAssignOperator(CompoundAssignOperator *Node);
+ void VisitAddrLabelExpr(AddrLabelExpr *Node);
+ void VisitTypesCompatibleExpr(TypesCompatibleExpr *Node);
+
+ // C++
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
+ void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node);
+ void VisitCXXThisExpr(CXXThisExpr *Node);
+ void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node);
+
+ // ObjC
+ void VisitObjCEncodeExpr(ObjCEncodeExpr *Node);
+ void VisitObjCMessageExpr(ObjCMessageExpr* Node);
+ void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
+ void VisitObjCProtocolExpr(ObjCProtocolExpr *Node);
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node);
+ void VisitObjCKVCRefExpr(ObjCKVCRefExpr *Node);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
+ void VisitObjCSuperExpr(ObjCSuperExpr *Node);
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::DumpLocation(SourceLocation Loc) {
+ SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
+
+ if (SpellingLoc.isInvalid()) {
+ fprintf(stderr, "<invalid sloc>");
+ return;
+ }
+
+ // The general format we print out is filename:line:col, but we drop pieces
+ // that haven't changed since the last loc printed.
+ PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
+
+ if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
+ fprintf(stderr, "%s:%u:%u", PLoc.getFilename(), PLoc.getLine(),
+ PLoc.getColumn());
+ LastLocFilename = PLoc.getFilename();
+ LastLocLine = PLoc.getLine();
+ } else if (PLoc.getLine() != LastLocLine) {
+ fprintf(stderr, "line:%u:%u", PLoc.getLine(), PLoc.getColumn());
+ LastLocLine = PLoc.getLine();
+ } else {
+ fprintf(stderr, "col:%u", PLoc.getColumn());
+ }
+}
+
+void StmtDumper::DumpSourceRange(const Stmt *Node) {
+ // Can't translate locations if a SourceManager isn't available.
+ if (SM == 0) return;
+
+ // TODO: If the parent expression is available, we can print a delta vs its
+ // location.
+ SourceRange R = Node->getSourceRange();
+
+ fprintf(stderr, " <");
+ DumpLocation(R.getBegin());
+ if (R.getBegin() != R.getEnd()) {
+ fprintf(stderr, ", ");
+ DumpLocation(R.getEnd());
+ }
+ fprintf(stderr, ">");
+
+ // <t2.c:123:421[blah], t2.c:412:321>
+
+}
+
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitStmt(Stmt *Node) {
+ DumpStmt(Node);
+}
+
+void StmtDumper::DumpDeclarator(Decl *D) {
+ // FIXME: Need to complete/beautify this... this code simply shows the
+ // nodes are where they need to be.
+ if (TypedefDecl *localType = dyn_cast<TypedefDecl>(D)) {
+ fprintf(F, "\"typedef %s %s\"",
+ localType->getUnderlyingType().getAsString().c_str(),
+ localType->getNameAsString().c_str());
+ } else if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ fprintf(F, "\"");
+ // Emit storage class for vardecls.
+ if (VarDecl *V = dyn_cast<VarDecl>(VD)) {
+ if (V->getStorageClass() != VarDecl::None)
+ fprintf(F, "%s ",
+ VarDecl::getStorageClassSpecifierString(V->getStorageClass()));
+ }
+
+ std::string Name = VD->getNameAsString();
+ VD->getType().getAsStringInternal(Name, Policy);
+ fprintf(F, "%s", Name.c_str());
+
+ // If this is a vardecl with an initializer, emit it.
+ if (VarDecl *V = dyn_cast<VarDecl>(VD)) {
+ if (V->getInit()) {
+ fprintf(F, " =\n");
+ DumpSubTree(V->getInit());
+ }
+ }
+ fprintf(F, "\"");
+ } else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ // print a free standing tag decl (e.g. "struct x;").
+ const char *tagname;
+ if (const IdentifierInfo *II = TD->getIdentifier())
+ tagname = II->getName();
+ else
+ tagname = "<anonymous>";
+ fprintf(F, "\"%s %s;\"", TD->getKindName(), tagname);
+ // FIXME: print tag bodies.
+ } else if (UsingDirectiveDecl *UD = dyn_cast<UsingDirectiveDecl>(D)) {
+ // print using-directive decl (e.g. "using namespace x;")
+ const char *ns;
+ if (const IdentifierInfo *II = UD->getNominatedNamespace()->getIdentifier())
+ ns = II->getName();
+ else
+ ns = "<anonymous>";
+ fprintf(F, "\"%s %s;\"",UD->getDeclKindName(), ns);
+ } else {
+ assert(0 && "Unexpected decl");
+ }
+}
+
+void StmtDumper::VisitDeclStmt(DeclStmt *Node) {
+ DumpStmt(Node);
+ fprintf(F,"\n");
+ for (DeclStmt::decl_iterator DI = Node->decl_begin(), DE = Node->decl_end();
+ DI != DE; ++DI) {
+ Decl* D = *DI;
+ ++IndentLevel;
+ Indent();
+ fprintf(F, "%p ", (void*) D);
+ DumpDeclarator(D);
+ if (DI+1 != DE)
+ fprintf(F,"\n");
+ --IndentLevel;
+ }
+}
+
+void StmtDumper::VisitLabelStmt(LabelStmt *Node) {
+ DumpStmt(Node);
+ fprintf(F, " '%s'", Node->getName());
+}
+
+void StmtDumper::VisitGotoStmt(GotoStmt *Node) {
+ DumpStmt(Node);
+ fprintf(F, " '%s':%p", Node->getLabel()->getName(), (void*)Node->getLabel());
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitExpr(Expr *Node) {
+ DumpExpr(Node);
+}
+
+void StmtDumper::VisitDeclRefExpr(DeclRefExpr *Node) {
+ DumpExpr(Node);
+
+ fprintf(F, " ");
+ switch (Node->getDecl()->getKind()) {
+ case Decl::Function: fprintf(F,"FunctionDecl"); break;
+ case Decl::Var: fprintf(F,"Var"); break;
+ case Decl::ParmVar: fprintf(F,"ParmVar"); break;
+ case Decl::EnumConstant: fprintf(F,"EnumConstant"); break;
+ case Decl::Typedef: fprintf(F,"Typedef"); break;
+ case Decl::Record: fprintf(F,"Record"); break;
+ case Decl::Enum: fprintf(F,"Enum"); break;
+ case Decl::CXXRecord: fprintf(F,"CXXRecord"); break;
+ case Decl::ObjCInterface: fprintf(F,"ObjCInterface"); break;
+ case Decl::ObjCClass: fprintf(F,"ObjCClass"); break;
+ default: fprintf(F,"Decl"); break;
+ }
+
+ fprintf(F, "='%s' %p", Node->getDecl()->getNameAsString().c_str(),
+ (void*)Node->getDecl());
+}
+
+void StmtDumper::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ DumpExpr(Node);
+
+ fprintf(F, " %sDecl='%s' %p", Node->getDecl()->getDeclKindName(),
+ Node->getDecl()->getNameAsString().c_str(), (void*)Node->getDecl());
+ if (Node->isFreeIvar())
+ fprintf(F, " isFreeIvar");
+}
+
+void StmtDumper::VisitPredefinedExpr(PredefinedExpr *Node) {
+ DumpExpr(Node);
+ switch (Node->getIdentType()) {
+ default: assert(0 && "unknown case");
+ case PredefinedExpr::Func: fprintf(F, " __func__"); break;
+ case PredefinedExpr::Function: fprintf(F, " __FUNCTION__"); break;
+ case PredefinedExpr::PrettyFunction: fprintf(F, " __PRETTY_FUNCTION__");break;
+ }
+}
+
+void StmtDumper::VisitCharacterLiteral(CharacterLiteral *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %d", Node->getValue());
+}
+
+void StmtDumper::VisitIntegerLiteral(IntegerLiteral *Node) {
+ DumpExpr(Node);
+
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ fprintf(F, " %s", Node->getValue().toString(10, isSigned).c_str());
+}
+void StmtDumper::VisitFloatingLiteral(FloatingLiteral *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %f", Node->getValueAsApproximateDouble());
+}
+
+void StmtDumper::VisitStringLiteral(StringLiteral *Str) {
+ DumpExpr(Str);
+ // FIXME: this doesn't print wstrings right.
+ fprintf(F, " %s\"", Str->isWide() ? "L" : "");
+
+ for (unsigned i = 0, e = Str->getByteLength(); i != e; ++i) {
+ switch (char C = Str->getStrData()[i]) {
+ default:
+ if (isprint(C))
+ fputc(C, F);
+ else
+ fprintf(F, "\\%03o", C);
+ break;
+ // Handle some common ones to make dumps prettier.
+ case '\\': fprintf(F, "\\\\"); break;
+ case '"': fprintf(F, "\\\""); break;
+ case '\n': fprintf(F, "\\n"); break;
+ case '\t': fprintf(F, "\\t"); break;
+ case '\a': fprintf(F, "\\a"); break;
+ case '\b': fprintf(F, "\\b"); break;
+ }
+ }
+ fprintf(F, "\"");
+}
+
+void StmtDumper::VisitUnaryOperator(UnaryOperator *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %s '%s'", Node->isPostfix() ? "postfix" : "prefix",
+ UnaryOperator::getOpcodeStr(Node->getOpcode()));
+}
+void StmtDumper::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %s ", Node->isSizeOf() ? "sizeof" : "alignof");
+ if (Node->isArgumentType())
+ DumpType(Node->getArgumentType());
+}
+
+void StmtDumper::VisitMemberExpr(MemberExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %s%s %p", Node->isArrow() ? "->" : ".",
+ Node->getMemberDecl()->getNameAsString().c_str(),
+ (void*)Node->getMemberDecl());
+}
+void StmtDumper::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %s", Node->getAccessor().getName());
+}
+void StmtDumper::VisitBinaryOperator(BinaryOperator *Node) {
+ DumpExpr(Node);
+ fprintf(F, " '%s'", BinaryOperator::getOpcodeStr(Node->getOpcode()));
+}
+void StmtDumper::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ DumpExpr(Node);
+ fprintf(F, " '%s' ComputeLHSTy=",
+ BinaryOperator::getOpcodeStr(Node->getOpcode()));
+ DumpType(Node->getComputationLHSType());
+ fprintf(F, " ComputeResultTy=");
+ DumpType(Node->getComputationResultType());
+}
+
+// GNU extensions.
+
+void StmtDumper::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %s %p", Node->getLabel()->getName(), (void*)Node->getLabel());
+}
+
+void StmtDumper::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " ");
+ DumpType(Node->getArgType1());
+ fprintf(F, " ");
+ DumpType(Node->getArgType2());
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %s<%s>", Node->getCastName(),
+ Node->getTypeAsWritten().getAsString().c_str());
+}
+
+void StmtDumper::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " %s", Node->getValue() ? "true" : "false");
+}
+
+void StmtDumper::VisitCXXThisExpr(CXXThisExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " this");
+}
+
+void StmtDumper::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " functional cast to %s",
+ Node->getTypeAsWritten().getAsString().c_str());
+}
+
+//===----------------------------------------------------------------------===//
+// Obj-C Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitObjCMessageExpr(ObjCMessageExpr* Node) {
+ DumpExpr(Node);
+ fprintf(F, " selector=%s", Node->getSelector().getAsString().c_str());
+ IdentifierInfo* clsName = Node->getClassName();
+ if (clsName) fprintf(F, " class=%s", clsName->getName());
+}
+
+void StmtDumper::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ DumpExpr(Node);
+
+ fprintf(F, " ");
+ DumpType(Node->getEncodedType());
+}
+
+void StmtDumper::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ DumpExpr(Node);
+
+ fprintf(F, " ");
+ fprintf(F, "%s", Node->getSelector().getAsString().c_str());
+}
+
+void StmtDumper::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ DumpExpr(Node);
+
+ fprintf(F, " ");
+ fprintf(F, "%s", Node->getProtocol()->getNameAsString().c_str());
+}
+
+void StmtDumper::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ DumpExpr(Node);
+
+ fprintf(F, " Kind=PropertyRef Property=\"%s\"",
+ Node->getProperty()->getNameAsString().c_str());
+}
+
+void StmtDumper::VisitObjCKVCRefExpr(ObjCKVCRefExpr *Node) {
+ DumpExpr(Node);
+
+ ObjCMethodDecl *Getter = Node->getGetterMethod();
+ ObjCMethodDecl *Setter = Node->getSetterMethod();
+ fprintf(F, " Kind=MethodRef Getter=\"%s\" Setter=\"%s\"",
+ Getter->getSelector().getAsString().c_str(),
+ Setter ? Setter->getSelector().getAsString().c_str() : "(null)");
+}
+
+void StmtDumper::VisitObjCSuperExpr(ObjCSuperExpr *Node) {
+ DumpExpr(Node);
+ fprintf(F, " super");
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+/// dump - This does a local dump of the specified AST fragment. It dumps the
+/// specified node and a few nodes underneath it, but not the whole subtree.
+/// This is useful in a debugger.
+void Stmt::dump(SourceManager &SM) const {
+ StmtDumper P(&SM, stderr, 4);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ fprintf(stderr, "\n");
+}
+
+/// dump - This does a local dump of the specified AST fragment. It dumps the
+/// specified node and a few nodes underneath it, but not the whole subtree.
+/// This is useful in a debugger.
+void Stmt::dump() const {
+ StmtDumper P(0, stderr, 4);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ fprintf(stderr, "\n");
+}
+
+/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+void Stmt::dumpAll(SourceManager &SM) const {
+ StmtDumper P(&SM, stderr, ~0U);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ fprintf(stderr, "\n");
+}
+
+/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+void Stmt::dumpAll() const {
+ StmtDumper P(0, stderr, ~0U);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ fprintf(stderr, "\n");
+}
diff --git a/lib/AST/StmtIterator.cpp b/lib/AST/StmtIterator.cpp
new file mode 100644
index 0000000..5c22e28
--- /dev/null
+++ b/lib/AST/StmtIterator.cpp
@@ -0,0 +1,155 @@
+//===--- StmtIterator.cpp - Iterators for Statements ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines internal methods for StmtIterator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtIterator.h"
+#include "clang/AST/Decl.h"
+
+using namespace clang;
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static inline VariableArrayType* FindVA(Type* t) {
+ while (ArrayType* vt = dyn_cast<ArrayType>(t)) {
+ if (VariableArrayType* vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return NULL;
+}
+
+void StmtIteratorBase::NextVA() {
+ assert (getVAPtr());
+
+ VariableArrayType* p = getVAPtr();
+ p = FindVA(p->getElementType().getTypePtr());
+ setVAPtr(p);
+
+ if (p)
+ return;
+
+ if (inDecl()) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(decl))
+ if (VD->Init)
+ return;
+
+ NextDecl();
+ }
+ else if (inDeclGroup()) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(*DGI))
+ if (VD->Init)
+ return;
+
+ NextDecl();
+ }
+ else {
+ assert (inSizeOfTypeVA());
+ assert(!decl);
+ RawVAPtr = 0;
+ }
+}
+
+void StmtIteratorBase::NextDecl(bool ImmediateAdvance) {
+ assert (getVAPtr() == NULL);
+
+ if (inDecl()) {
+ assert (decl);
+
+ // FIXME: SIMPLIFY AWAY.
+ if (ImmediateAdvance)
+ decl = 0;
+ else if (HandleDecl(decl))
+ return;
+ }
+ else {
+ assert (inDeclGroup());
+
+ if (ImmediateAdvance)
+ ++DGI;
+
+ for ( ; DGI != DGE; ++DGI)
+ if (HandleDecl(*DGI))
+ return;
+ }
+
+ RawVAPtr = 0;
+}
+
+bool StmtIteratorBase::HandleDecl(Decl* D) {
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(D)) {
+ if (VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+
+ if (VD->getInit())
+ return true;
+ }
+ else if (TypedefDecl* TD = dyn_cast<TypedefDecl>(D)) {
+ if (VariableArrayType* VAPtr =
+ FindVA(TD->getUnderlyingType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+ }
+ else if (EnumConstantDecl* ECD = dyn_cast<EnumConstantDecl>(D)) {
+ if (ECD->getInitExpr())
+ return true;
+ }
+
+ return false;
+}
+
+StmtIteratorBase::StmtIteratorBase(Decl* d)
+ : decl(d), RawVAPtr(DeclMode) {
+ assert (decl);
+ NextDecl(false);
+}
+
+StmtIteratorBase::StmtIteratorBase(Decl** dgi, Decl** dge)
+ : DGI(dgi), RawVAPtr(DeclGroupMode), DGE(dge) {
+ NextDecl(false);
+}
+
+StmtIteratorBase::StmtIteratorBase(VariableArrayType* t)
+: decl(0), RawVAPtr(SizeOfTypeVAMode) {
+ RawVAPtr |= reinterpret_cast<uintptr_t>(t);
+}
+
+Stmt*& StmtIteratorBase::GetDeclExpr() const {
+
+ if (VariableArrayType* VAPtr = getVAPtr()) {
+ assert (VAPtr->SizeExpr);
+ return VAPtr->SizeExpr;
+ }
+
+ assert (inDecl() || inDeclGroup());
+
+ if (inDeclGroup()) {
+ VarDecl* VD = cast<VarDecl>(*DGI);
+ return *VD->getInitAddress();
+ }
+
+ assert (inDecl());
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(decl)) {
+ assert (VD->Init);
+ return *VD->getInitAddress();
+ }
+
+ EnumConstantDecl* ECD = cast<EnumConstantDecl>(decl);
+ return ECD->Init;
+}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
new file mode 100644
index 0000000..710da63
--- /dev/null
+++ b/lib/AST/StmtPrinter.cpp
@@ -0,0 +1,1239 @@
+//===--- StmtPrinter.cpp - Printing implementation for Stmt ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dumpPretty/Stmt::printPretty methods, which
+// pretty print the AST back out to C code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/Format.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtPrinter Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VISIBILITY_HIDDEN StmtPrinter : public StmtVisitor<StmtPrinter> {
+ llvm::raw_ostream &OS;
+ ASTContext &Context;
+ unsigned IndentLevel;
+ clang::PrinterHelper* Helper;
+ PrintingPolicy Policy;
+
+ public:
+ StmtPrinter(llvm::raw_ostream &os, ASTContext &C, PrinterHelper* helper,
+ const PrintingPolicy &Policy = PrintingPolicy(),
+ unsigned Indentation = 0)
+ : OS(os), Context(C), IndentLevel(Indentation), Helper(helper),
+ Policy(Policy) {}
+
+ void PrintStmt(Stmt *S) {
+ PrintStmt(S, Policy.Indentation);
+ }
+
+ void PrintStmt(Stmt *S, int SubIndent) {
+ IndentLevel += SubIndent;
+ if (S && isa<Expr>(S)) {
+ // If this is an expr used in a stmt context, indent and newline it.
+ Indent();
+ Visit(S);
+ OS << ";\n";
+ } else if (S) {
+ Visit(S);
+ } else {
+ Indent() << "<<<NULL STATEMENT>>>\n";
+ }
+ IndentLevel -= SubIndent;
+ }
+
+ void PrintRawCompoundStmt(CompoundStmt *S);
+ void PrintRawDecl(Decl *D);
+ void PrintRawDeclStmt(DeclStmt *S);
+ void PrintRawIfStmt(IfStmt *If);
+ void PrintRawCXXCatchStmt(CXXCatchStmt *Catch);
+
+ void PrintExpr(Expr *E) {
+ if (E)
+ Visit(E);
+ else
+ OS << "<null expr>";
+ }
+
+ llvm::raw_ostream &Indent(int Delta = 0) {
+ for (int i = 0, e = IndentLevel+Delta; i < e; ++i)
+ OS << " ";
+ return OS;
+ }
+
+ bool PrintOffsetOfDesignator(Expr *E);
+ void VisitUnaryOffsetOf(UnaryOperator *Node);
+
+ void Visit(Stmt* S) {
+ if (Helper && Helper->handledStmt(S,OS))
+ return;
+ else StmtVisitor<StmtPrinter>::Visit(S);
+ }
+
+ void VisitStmt(Stmt *Node);
+#define STMT(CLASS, PARENT) \
+ void Visit##CLASS(CLASS *Node);
+#include "clang/AST/StmtNodes.def"
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtPrinter::VisitStmt(Stmt *Node) {
+ Indent() << "<<unknown stmt type>>\n";
+}
+
+/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and
+/// with no newline after the }.
+void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
+ OS << "{\n";
+ for (CompoundStmt::body_iterator I = Node->body_begin(), E = Node->body_end();
+ I != E; ++I)
+ PrintStmt(*I);
+
+ Indent() << "}";
+}
+
+void StmtPrinter::PrintRawDecl(Decl *D) {
+ D->print(OS, Context, Policy, IndentLevel);
+}
+
+void StmtPrinter::PrintRawDeclStmt(DeclStmt *S) {
+ DeclStmt::decl_iterator Begin = S->decl_begin(), End = S->decl_end();
+ llvm::SmallVector<Decl*, 2> Decls;
+ for ( ; Begin != End; ++Begin)
+ Decls.push_back(*Begin);
+
+ Decl::printGroup(Decls.data(), Decls.size(), OS, Context, Policy,
+ IndentLevel);
+}
+
+void StmtPrinter::VisitNullStmt(NullStmt *Node) {
+ Indent() << ";\n";
+}
+
+void StmtPrinter::VisitDeclStmt(DeclStmt *Node) {
+ Indent();
+ PrintRawDeclStmt(Node);
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitCompoundStmt(CompoundStmt *Node) {
+ Indent();
+ PrintRawCompoundStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCaseStmt(CaseStmt *Node) {
+ Indent(-1) << "case ";
+ PrintExpr(Node->getLHS());
+ if (Node->getRHS()) {
+ OS << " ... ";
+ PrintExpr(Node->getRHS());
+ }
+ OS << ":\n";
+
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitDefaultStmt(DefaultStmt *Node) {
+ Indent(-1) << "default:\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
+ Indent(-1) << Node->getName() << ":\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
+ OS << "if (";
+ PrintExpr(If->getCond());
+ OS << ')';
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << (If->getElse() ? ' ' : '\n');
+ } else {
+ OS << '\n';
+ PrintStmt(If->getThen());
+ if (If->getElse()) Indent();
+ }
+
+ if (Stmt *Else = If->getElse()) {
+ OS << "else";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Else)) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << '\n';
+ } else if (IfStmt *ElseIf = dyn_cast<IfStmt>(Else)) {
+ OS << ' ';
+ PrintRawIfStmt(ElseIf);
+ } else {
+ OS << '\n';
+ PrintStmt(If->getElse());
+ }
+ }
+}
+
+void StmtPrinter::VisitIfStmt(IfStmt *If) {
+ Indent();
+ PrintRawIfStmt(If);
+}
+
+void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
+ Indent() << "switch (";
+ PrintExpr(Node->getCond());
+ OS << ")";
+
+ // Pretty print compoundstmt bodies (very common).
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ OS << " ";
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitSwitchCase(SwitchCase*) {
+ assert(0 && "SwitchCase is an abstract class");
+}
+
+void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
+ Indent() << "while (";
+ PrintExpr(Node->getCond());
+ OS << ")\n";
+ PrintStmt(Node->getBody());
+}
+
+void StmtPrinter::VisitDoStmt(DoStmt *Node) {
+ Indent() << "do ";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << " ";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ Indent();
+ }
+
+ OS << "while (";
+ PrintExpr(Node->getCond());
+ OS << ");\n";
+}
+
+void StmtPrinter::VisitForStmt(ForStmt *Node) {
+ Indent() << "for (";
+ if (Node->getInit()) {
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getInit()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getInit()));
+ }
+ OS << ";";
+ if (Node->getCond()) {
+ OS << " ";
+ PrintExpr(Node->getCond());
+ }
+ OS << ";";
+ if (Node->getInc()) {
+ OS << " ";
+ PrintExpr(Node->getInc());
+ }
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
+ Indent() << "for (";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getElement()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getElement()));
+ OS << " in ";
+ PrintExpr(Node->getCollection());
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
+ Indent() << "goto " << Node->getLabel()->getName() << ";\n";
+}
+
+void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
+ Indent() << "goto *";
+ PrintExpr(Node->getTarget());
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
+ Indent() << "continue;\n";
+}
+
+void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
+ Indent() << "break;\n";
+}
+
+
+void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
+ Indent() << "return";
+ if (Node->getRetValue()) {
+ OS << " ";
+ PrintExpr(Node->getRetValue());
+ }
+ OS << ";\n";
+}
+
+
+void StmtPrinter::VisitAsmStmt(AsmStmt *Node) {
+ Indent() << "asm ";
+
+ if (Node->isVolatile())
+ OS << "volatile ";
+
+ OS << "(";
+ VisitStringLiteral(Node->getAsmString());
+
+ // Outputs
+ if (Node->getNumOutputs() != 0 || Node->getNumInputs() != 0 ||
+ Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumOutputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getOutputName(i).empty()) {
+ OS << '[';
+ OS << Node->getOutputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getOutputConstraintLiteral(i));
+ OS << " ";
+ Visit(Node->getOutputExpr(i));
+ }
+
+ // Inputs
+ if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumInputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getInputName(i).empty()) {
+ OS << '[';
+ OS << Node->getInputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getInputConstraintLiteral(i));
+ OS << " ";
+ Visit(Node->getInputExpr(i));
+ }
+
+ // Clobbers
+ if (Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumClobbers(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ VisitStringLiteral(Node->getClobber(i));
+ }
+
+ OS << ");\n";
+}
+
+void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
+ Indent() << "@try";
+ if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
+ PrintRawCompoundStmt(TS);
+ OS << "\n";
+ }
+
+ for (ObjCAtCatchStmt *catchStmt =
+ static_cast<ObjCAtCatchStmt *>(Node->getCatchStmts());
+ catchStmt;
+ catchStmt =
+ static_cast<ObjCAtCatchStmt *>(catchStmt->getNextCatchStmt())) {
+ Indent() << "@catch(";
+ if (catchStmt->getCatchParamDecl()) {
+ if (Decl *DS = catchStmt->getCatchParamDecl())
+ PrintRawDecl(DS);
+ }
+ OS << ")";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody()))
+ {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ }
+ }
+
+ if (ObjCAtFinallyStmt *FS =static_cast<ObjCAtFinallyStmt *>(
+ Node->getFinallyStmt())) {
+ Indent() << "@finally";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
+ OS << "\n";
+ }
+}
+
+void StmtPrinter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *Node) {
+}
+
+void StmtPrinter::VisitObjCAtCatchStmt (ObjCAtCatchStmt *Node) {
+ Indent() << "@catch (...) { /* todo */ } \n";
+}
+
+void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) {
+ Indent() << "@throw";
+ if (Node->getThrowExpr()) {
+ OS << " ";
+ PrintExpr(Node->getThrowExpr());
+ }
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
+ Indent() << "@synchronized (";
+ PrintExpr(Node->getSynchExpr());
+ OS << ")";
+ PrintRawCompoundStmt(Node->getSynchBody());
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
+ OS << "catch (";
+ if (Decl *ExDecl = Node->getExceptionDecl())
+ PrintRawDecl(ExDecl);
+ else
+ OS << "...";
+ OS << ") ";
+ PrintRawCompoundStmt(cast<CompoundStmt>(Node->getHandlerBlock()));
+}
+
+void StmtPrinter::VisitCXXCatchStmt(CXXCatchStmt *Node) {
+ Indent();
+ PrintRawCXXCatchStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) {
+ Indent() << "try ";
+ PrintRawCompoundStmt(Node->getTryBlock());
+ for(unsigned i = 0, e = Node->getNumHandlers(); i < e; ++i) {
+ OS << " ";
+ PrintRawCXXCatchStmt(Node->getHandler(i));
+ }
+ OS << "\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtPrinter::VisitExpr(Expr *Node) {
+ OS << "<<unknown expr type>>";
+}
+
+void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
+ OS << Node->getDecl()->getNameAsString();
+}
+
+void StmtPrinter::VisitQualifiedDeclRefExpr(QualifiedDeclRefExpr *Node) {
+ NamedDecl *D = Node->getDecl();
+
+ Node->getQualifier()->print(OS, Policy);
+ OS << D->getNameAsString();
+}
+
+void StmtPrinter::VisitUnresolvedDeclRefExpr(UnresolvedDeclRefExpr *Node) {
+ Node->getQualifier()->print(OS, Policy);
+ OS << Node->getDeclName().getAsString();
+}
+
+void StmtPrinter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ OS << Node->getDecl()->getNameAsString();
+}
+
+void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ }
+ OS << Node->getProperty()->getNameAsCString();
+}
+
+void StmtPrinter::VisitObjCKVCRefExpr(ObjCKVCRefExpr *Node) {
+ if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ }
+ // FIXME: Setter/Getter names
+}
+
+void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
+ switch (Node->getIdentType()) {
+ default:
+ assert(0 && "unknown case");
+ case PredefinedExpr::Func:
+ OS << "__func__";
+ break;
+ case PredefinedExpr::Function:
+ OS << "__FUNCTION__";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ OS << "__PRETTY_FUNCTION__";
+ break;
+ }
+}
+
+void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
+ unsigned value = Node->getValue();
+ if (Node->isWide())
+ OS << "L";
+ switch (value) {
+ case '\\':
+ OS << "'\\\\'";
+ break;
+ case '\'':
+ OS << "'\\''";
+ break;
+ case '\a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ OS << "'\\a'";
+ break;
+ case '\b':
+ OS << "'\\b'";
+ break;
+ // Nonstandard escape sequence.
+ /*case '\e':
+ OS << "'\\e'";
+ break;*/
+ case '\f':
+ OS << "'\\f'";
+ break;
+ case '\n':
+ OS << "'\\n'";
+ break;
+ case '\r':
+ OS << "'\\r'";
+ break;
+ case '\t':
+ OS << "'\\t'";
+ break;
+ case '\v':
+ OS << "'\\v'";
+ break;
+ default:
+ if (value < 256 && isprint(value)) {
+ OS << "'" << (char)value << "'";
+ } else if (value < 256) {
+ OS << "'\\x" << llvm::format("%x", value) << "'";
+ } else {
+ // FIXME what to really do here?
+ OS << value;
+ }
+ }
+}
+
+void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ OS << Node->getValue().toString(10, isSigned);
+
+ // Emit suffixes. Integer literals are always a builtin integer type.
+ switch (Node->getType()->getAsBuiltinType()->getKind()) {
+ default: assert(0 && "Unexpected type for integer literal!");
+ case BuiltinType::Int: break; // no suffix.
+ case BuiltinType::UInt: OS << 'U'; break;
+ case BuiltinType::Long: OS << 'L'; break;
+ case BuiltinType::ULong: OS << "UL"; break;
+ case BuiltinType::LongLong: OS << "LL"; break;
+ case BuiltinType::ULongLong: OS << "ULL"; break;
+ }
+}
+void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
+ // FIXME: print value more precisely.
+ OS << Node->getValueAsApproximateDouble();
+}
+
+void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
+ PrintExpr(Node->getSubExpr());
+ OS << "i";
+}
+
+void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
+ if (Str->isWide()) OS << 'L';
+ OS << '"';
+
+ // FIXME: this doesn't print wstrings right.
+ for (unsigned i = 0, e = Str->getByteLength(); i != e; ++i) {
+ unsigned char Char = Str->getStrData()[i];
+
+ switch (Char) {
+ default:
+ if (isprint(Char))
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0'+ ((Char >> 6) & 7))
+ << (char)('0'+ ((Char >> 3) & 7))
+ << (char)('0'+ ((Char >> 0) & 7));
+ break;
+ // Handle some common non-printable cases to make dumps prettier.
+ case '\\': OS << "\\\\"; break;
+ case '"': OS << "\\\""; break;
+ case '\n': OS << "\\n"; break;
+ case '\t': OS << "\\t"; break;
+ case '\a': OS << "\\a"; break;
+ case '\b': OS << "\\b"; break;
+ }
+ }
+ OS << '"';
+}
+void StmtPrinter::VisitParenExpr(ParenExpr *Node) {
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
+ if (!Node->isPostfix()) {
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+
+ // Print a space if this is an "identifier operator" like __real.
+ switch (Node->getOpcode()) {
+ default: break;
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ case UnaryOperator::Extension:
+ OS << ' ';
+ break;
+ }
+ }
+ PrintExpr(Node->getSubExpr());
+
+ if (Node->isPostfix())
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+}
+
+bool StmtPrinter::PrintOffsetOfDesignator(Expr *E) {
+ if (isa<UnaryOperator>(E)) {
+ // Base case, print the type and comma.
+ OS << E->getType().getAsString() << ", ";
+ return true;
+ } else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E)) {
+ PrintOffsetOfDesignator(ASE->getLHS());
+ OS << "[";
+ PrintExpr(ASE->getRHS());
+ OS << "]";
+ return false;
+ } else {
+ MemberExpr *ME = cast<MemberExpr>(E);
+ bool IsFirst = PrintOffsetOfDesignator(ME->getBase());
+ OS << (IsFirst ? "" : ".") << ME->getMemberDecl()->getNameAsString();
+ return false;
+ }
+}
+
+void StmtPrinter::VisitUnaryOffsetOf(UnaryOperator *Node) {
+ OS << "__builtin_offsetof(";
+ PrintOffsetOfDesignator(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node) {
+ OS << (Node->isSizeOf() ? "sizeof" : "__alignof");
+ if (Node->isArgumentType())
+ OS << "(" << Node->getArgumentType().getAsString() << ")";
+ else {
+ OS << " ";
+ PrintExpr(Node->getArgumentExpr());
+ }
+}
+void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
+ PrintExpr(Node->getLHS());
+ OS << "[";
+ PrintExpr(Node->getRHS());
+ OS << "]";
+}
+
+void StmtPrinter::VisitCallExpr(CallExpr *Call) {
+ PrintExpr(Call->getCallee());
+ OS << "(";
+ for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(Call->getArg(i));
+ }
+ OS << ")";
+}
+void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
+ // FIXME: Suppress printing implicit bases (like "this")
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ // FIXME: Suppress printing references to unnamed objects
+ // representing anonymous unions/structs
+ OS << Node->getMemberDecl()->getNameAsString();
+}
+void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ OS << Node->getAccessor().getName();
+}
+void StmtPrinter::VisitCastExpr(CastExpr *) {
+ assert(0 && "CastExpr is an abstract class");
+}
+void StmtPrinter::VisitExplicitCastExpr(ExplicitCastExpr *) {
+ assert(0 && "ExplicitCastExpr is an abstract class");
+}
+void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
+ OS << "(" << Node->getType().getAsString() << ")";
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
+ OS << "(" << Node->getType().getAsString() << ")";
+ PrintExpr(Node->getInitializer());
+}
+void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
+ // No need to print anything, simply forward to the sub expression.
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) {
+ PrintExpr(Node->getCond());
+
+ if (Node->getLHS()) {
+ OS << " ? ";
+ PrintExpr(Node->getLHS());
+ OS << " : ";
+ }
+ else { // Handle GCC extension where LHS can be NULL.
+ OS << " ?: ";
+ }
+
+ PrintExpr(Node->getRHS());
+}
+
+// GNU extensions.
+
+void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ OS << "&&" << Node->getLabel()->getName();
+}
+
+void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
+ OS << "(";
+ PrintRawCompoundStmt(E->getSubStmt());
+ OS << ")";
+}
+
+void StmtPrinter::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
+ OS << "__builtin_types_compatible_p(";
+ OS << Node->getArgType1().getAsString() << ",";
+ OS << Node->getArgType2().getAsString() << ")";
+}
+
+void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
+ OS << "__builtin_choose_expr(";
+ PrintExpr(Node->getCond());
+ OS << ", ";
+ PrintExpr(Node->getLHS());
+ OS << ", ";
+ PrintExpr(Node->getRHS());
+ OS << ")";
+}
+
+void StmtPrinter::VisitGNUNullExpr(GNUNullExpr *) {
+ OS << "__null";
+}
+
+void StmtPrinter::VisitShuffleVectorExpr(ShuffleVectorExpr *Node) {
+ OS << "__builtin_shufflevector(";
+ for (unsigned i = 0, e = Node->getNumSubExprs(); i != e; ++i) {
+ if (i) OS << ", ";
+ PrintExpr(Node->getExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitInitListExpr(InitListExpr* Node) {
+ if (Node->getSyntacticForm()) {
+ Visit(Node->getSyntacticForm());
+ return;
+ }
+
+ OS << "{ ";
+ for (unsigned i = 0, e = Node->getNumInits(); i != e; ++i) {
+ if (i) OS << ", ";
+ if (Node->getInit(i))
+ PrintExpr(Node->getInit(i));
+ else
+ OS << "0";
+ }
+ OS << " }";
+}
+
+void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) {
+ for (DesignatedInitExpr::designators_iterator D = Node->designators_begin(),
+ DEnd = Node->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ if (D->getDotLoc().isInvalid())
+ OS << D->getFieldName()->getName() << ":";
+ else
+ OS << "." << D->getFieldName()->getName();
+ } else {
+ OS << "[";
+ if (D->isArrayDesignator()) {
+ PrintExpr(Node->getArrayIndex(*D));
+ } else {
+ PrintExpr(Node->getArrayRangeStart(*D));
+ OS << " ... ";
+ PrintExpr(Node->getArrayRangeEnd(*D));
+ }
+ OS << "]";
+ }
+ }
+
+ OS << " = ";
+ PrintExpr(Node->getInit());
+}
+
+void StmtPrinter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *Node) {
+ if (Policy.CPlusPlus)
+ OS << "/*implicit*/" << Node->getType().getAsString(Policy) << "()";
+ else {
+ OS << "/*implicit*/(" << Node->getType().getAsString(Policy) << ")";
+ if (Node->getType()->isRecordType())
+ OS << "{}";
+ else
+ OS << 0;
+ }
+}
+
+void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
+ OS << "__builtin_va_arg(";
+ PrintExpr(Node->getSubExpr());
+ OS << ", ";
+ OS << Node->getType().getAsString();
+ OS << ")";
+}
+
+// C++
+void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
+ const char *OpStrings[NUM_OVERLOADED_OPERATORS] = {
+ "",
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+
+ OverloadedOperatorKind Kind = Node->getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind];
+ }
+ } else if (Kind == OO_Call) {
+ PrintExpr(Node->getArg(0));
+ OS << '(';
+ for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) {
+ if (ArgIdx > 1)
+ OS << ", ";
+ if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx)))
+ PrintExpr(Node->getArg(ArgIdx));
+ }
+ OS << ')';
+ } else if (Kind == OO_Subscript) {
+ PrintExpr(Node->getArg(0));
+ OS << '[';
+ PrintExpr(Node->getArg(1));
+ OS << ']';
+ } else if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else if (Node->getNumArgs() == 2) {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(1));
+ } else {
+ assert(false && "unknown overloaded operator");
+ }
+}
+
+void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
+ VisitCallExpr(cast<CallExpr>(Node));
+}
+
+void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ OS << Node->getCastName() << '<';
+ OS << Node->getTypeAsWritten().getAsString() << ">(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXStaticCastExpr(CXXStaticCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
+ OS << "typeid(";
+ if (Node->isTypeOperand()) {
+ OS << Node->getTypeOperand().getAsString();
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "true" : "false");
+}
+
+void StmtPrinter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *Node) {
+ OS << "nullptr";
+}
+
+void StmtPrinter::VisitCXXThisExpr(CXXThisExpr *Node) {
+ OS << "this";
+}
+
+void StmtPrinter::VisitCXXThrowExpr(CXXThrowExpr *Node) {
+ if (Node->getSubExpr() == 0)
+ OS << "throw";
+ else {
+ OS << "throw ";
+ PrintExpr(Node->getSubExpr());
+ }
+}
+
+void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) {
+ // Nothing to print: we picked up the default argument
+}
+
+void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ OS << Node->getType().getAsString();
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
+ PrintExpr(Node->getSubExpr());
+}
+
+void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
+ OS << Node->getType().getAsString();
+ OS << "(";
+ for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *Node) {
+ OS << Node->getType().getAsString() << "()";
+}
+
+void
+StmtPrinter::VisitCXXConditionDeclExpr(CXXConditionDeclExpr *E) {
+ PrintRawDecl(E->getVarDecl());
+}
+
+void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->isGlobalNew())
+ OS << "::";
+ OS << "new ";
+ unsigned NumPlace = E->getNumPlacementArgs();
+ if (NumPlace > 0) {
+ OS << "(";
+ PrintExpr(E->getPlacementArg(0));
+ for (unsigned i = 1; i < NumPlace; ++i) {
+ OS << ", ";
+ PrintExpr(E->getPlacementArg(i));
+ }
+ OS << ") ";
+ }
+ if (E->isParenTypeId())
+ OS << "(";
+ std::string TypeS;
+ if (Expr *Size = E->getArraySize()) {
+ llvm::raw_string_ostream s(TypeS);
+ Size->printPretty(s, Context, Helper, Policy);
+ s.flush();
+ TypeS = "[" + TypeS + "]";
+ }
+ E->getAllocatedType().getAsStringInternal(TypeS, Policy);
+ OS << TypeS;
+ if (E->isParenTypeId())
+ OS << ")";
+
+ if (E->hasInitializer()) {
+ OS << "(";
+ unsigned NumCons = E->getNumConstructorArgs();
+ if (NumCons > 0) {
+ PrintExpr(E->getConstructorArg(0));
+ for (unsigned i = 1; i < NumCons; ++i) {
+ OS << ", ";
+ PrintExpr(E->getConstructorArg(i));
+ }
+ }
+ OS << ")";
+ }
+}
+
+void StmtPrinter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->isGlobalDelete())
+ OS << "::";
+ OS << "delete ";
+ if (E->isArrayForm())
+ OS << "[] ";
+ PrintExpr(E->getArgument());
+}
+
+void StmtPrinter::VisitUnresolvedFunctionNameExpr(UnresolvedFunctionNameExpr *E) {
+ OS << E->getName().getAsString();
+}
+
+void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ // Nothing to print.
+}
+
+void StmtPrinter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ // Just forward to the sub expression.
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *Node) {
+ OS << Node->getTypeAsWritten().getAsString();
+ OS << "(";
+ for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXUnresolvedMemberExpr(CXXUnresolvedMemberExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ OS << Node->getMember().getAsString();
+}
+
+static const char *getTypeTraitName(UnaryTypeTrait UTT) {
+ switch (UTT) {
+ default: assert(false && "Unknown type trait");
+ case UTT_HasNothrowAssign: return "__has_nothrow_assign";
+ case UTT_HasNothrowCopy: return "__has_nothrow_copy";
+ case UTT_HasNothrowConstructor: return "__has_nothrow_constructor";
+ case UTT_HasTrivialAssign: return "__has_trivial_assign";
+ case UTT_HasTrivialCopy: return "__has_trivial_copy";
+ case UTT_HasTrivialConstructor: return "__has_trivial_constructor";
+ case UTT_HasTrivialDestructor: return "__has_trivial_destructor";
+ case UTT_HasVirtualDestructor: return "__has_virtual_destructor";
+ case UTT_IsAbstract: return "__is_abstract";
+ case UTT_IsClass: return "__is_class";
+ case UTT_IsEmpty: return "__is_empty";
+ case UTT_IsEnum: return "__is_enum";
+ case UTT_IsPOD: return "__is_pod";
+ case UTT_IsPolymorphic: return "__is_polymorphic";
+ case UTT_IsUnion: return "__is_union";
+ }
+}
+
+void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getQueriedType().getAsString() << ")";
+}
+
+// Obj-C
+
+void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
+ OS << "@";
+ VisitStringLiteral(Node->getString());
+}
+
+void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ OS << "@encode(" << Node->getEncodedType().getAsString() << ')';
+}
+
+void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ OS << "@selector(" << Node->getSelector().getAsString() << ')';
+}
+
+void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ OS << "@protocol(" << Node->getProtocol()->getNameAsString() << ')';
+}
+
+void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
+ OS << "[";
+ Expr *receiver = Mess->getReceiver();
+ if (receiver) PrintExpr(receiver);
+ else OS << Mess->getClassName()->getName();
+ OS << ' ';
+ Selector selector = Mess->getSelector();
+ if (selector.isUnarySelector()) {
+ OS << selector.getIdentifierInfoForSlot(0)->getName();
+ } else {
+ for (unsigned i = 0, e = Mess->getNumArgs(); i != e; ++i) {
+ if (i < selector.getNumArgs()) {
+ if (i > 0) OS << ' ';
+ if (selector.getIdentifierInfoForSlot(i))
+ OS << selector.getIdentifierInfoForSlot(i)->getName() << ':';
+ else
+ OS << ":";
+ }
+ else OS << ", "; // Handle variadic methods.
+
+ PrintExpr(Mess->getArg(i));
+ }
+ }
+ OS << "]";
+}
+
+void StmtPrinter::VisitObjCSuperExpr(ObjCSuperExpr *) {
+ OS << "super";
+}
+
+void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
+ BlockDecl *BD = Node->getBlockDecl();
+ OS << "^";
+
+ const FunctionType *AFT = Node->getFunctionType();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ OS << "()";
+ } else if (!BD->param_empty() || cast<FunctionProtoType>(AFT)->isVariadic()) {
+ OS << '(';
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) OS << ", ";
+ ParamStr = (*AI)->getNameAsString();
+ (*AI)->getType().getAsStringInternal(ParamStr, Policy);
+ OS << ParamStr;
+ }
+
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+ }
+}
+
+void StmtPrinter::VisitBlockDeclRefExpr(BlockDeclRefExpr *Node) {
+ OS << Node->getDecl()->getNameAsString();
+}
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+void Stmt::dumpPretty(ASTContext& Context) const {
+ printPretty(llvm::errs(), Context, 0, PrintingPolicy());
+}
+
+void Stmt::printPretty(llvm::raw_ostream &OS, ASTContext& Context,
+ PrinterHelper* Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation) const {
+ if (this == 0) {
+ OS << "<NULL>";
+ return;
+ }
+
+ if (Policy.Dump) {
+ dump();
+ return;
+ }
+
+ StmtPrinter P(OS, Context, Helper, Policy, Indentation);
+ P.Visit(const_cast<Stmt*>(this));
+}
+
+//===----------------------------------------------------------------------===//
+// PrinterHelper
+//===----------------------------------------------------------------------===//
+
+// Implement virtual destructor.
+PrinterHelper::~PrinterHelper() {}
diff --git a/lib/AST/StmtViz.cpp b/lib/AST/StmtViz.cpp
new file mode 100644
index 0000000..1316d35
--- /dev/null
+++ b/lib/AST/StmtViz.cpp
@@ -0,0 +1,61 @@
+//===--- StmtViz.cpp - Graphviz visualization for Stmt ASTs -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Stmt::viewAST, which generates a Graphviz DOT file
+// that depicts the AST and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtGraphTraits.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/GraphWriter.h"
+#include <sstream>
+
+using namespace clang;
+
+void Stmt::viewAST() const {
+#ifndef NDEBUG
+ llvm::ViewGraph(this,"AST");
+#else
+ llvm::cerr << "Stmt::viewAST is only available in debug builds on "
+ << "systems with Graphviz or gv!\n";
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const Stmt*> : public DefaultDOTGraphTraits {
+ static std::string getNodeLabel(const Stmt* Node, const Stmt* Graph) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+
+ if (Node)
+ Out << Node->getStmtClassName();
+ else
+ Out << "<NULL>";
+
+ std::string OutStr = Out.str();
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/lib/AST/TemplateName.cpp b/lib/AST/TemplateName.cpp
new file mode 100644
index 0000000..3613da7
--- /dev/null
+++ b/lib/AST/TemplateName.cpp
@@ -0,0 +1,65 @@
+//===--- TemplateName.h - C++ Template Name Representation-------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TemplateName interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+TemplateDecl *TemplateName::getAsTemplateDecl() const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ return Template;
+
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
+ return QTN->getTemplateDecl();
+
+ return 0;
+}
+
+bool TemplateName::isDependent() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ // FIXME: We don't yet have a notion of dependent
+ // declarations. When we do, check that. This hack won't last
+ // long!.
+ return isa<TemplateTemplateParmDecl>(Template);
+ }
+
+ return true;
+}
+
+void
+TemplateName::print(llvm::raw_ostream &OS, const PrintingPolicy &Policy,
+ bool SuppressNNS) const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ OS << Template->getIdentifier()->getName();
+ else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (!SuppressNNS)
+ QTN->getQualifier()->print(OS, Policy);
+ if (QTN->hasTemplateKeyword())
+ OS << "template ";
+ OS << QTN->getTemplateDecl()->getIdentifier()->getName();
+ } else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
+ if (!SuppressNNS)
+ DTN->getQualifier()->print(OS, Policy);
+ OS << "template ";
+ OS << DTN->getName()->getName();
+ }
+}
+
+void TemplateName::dump() const {
+ PrintingPolicy Policy;
+ Policy.CPlusPlus = true;
+ print(llvm::errs(), Policy);
+}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
new file mode 100644
index 0000000..b2ee58f
--- /dev/null
+++ b/lib/AST/Type.cpp
@@ -0,0 +1,1658 @@
+//===--- Type.cpp - Type representation and manipulation ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements type-related functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+bool QualType::isConstant(ASTContext &Ctx) const {
+ if (isConstQualified())
+ return true;
+
+ if (getTypePtr()->isArrayType())
+ return Ctx.getAsArrayType(*this)->getElementType().isConstant(Ctx);
+
+ return false;
+}
+
+void Type::Destroy(ASTContext& C) {
+ this->~Type();
+ C.Deallocate(this);
+}
+
+void VariableArrayType::Destroy(ASTContext& C) {
+ if (SizeExpr)
+ SizeExpr->Destroy(C);
+ this->~VariableArrayType();
+ C.Deallocate(this);
+}
+
+void DependentSizedArrayType::Destroy(ASTContext& C) {
+ SizeExpr->Destroy(C);
+ this->~DependentSizedArrayType();
+ C.Deallocate(this);
+}
+
+/// getArrayElementTypeNoTypeQual - If this is an array type, return the
+/// element type of the array, potentially with type qualifiers missing.
+/// This method should never be used when type qualifiers are meaningful.
+const Type *Type::getArrayElementTypeNoTypeQual() const {
+ // If this is directly an array type, return it.
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType().getTypePtr();
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ArrayType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (ArrayType *AT = dyn_cast<ArrayType>(CanonicalType.getUnqualifiedType()))
+ return AT->getElementType().getTypePtr();
+ return 0;
+ }
+
+ // If this is a typedef for an array type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ArrayType>(getDesugaredType())->getElementType().getTypePtr();
+}
+
+/// getDesugaredType - Return the specified type with any "sugar" removed from
+/// the type. This takes off typedefs, typeof's etc. If the outer level of
+/// the type is already concrete, it returns it unmodified. This is similar
+/// to getting the canonical type, but it doesn't remove *all* typedefs. For
+/// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
+/// concrete.
+///
+/// \param ForDisplay When true, the desugaring is provided for
+/// display purposes only. In this case, we apply more heuristics to
+/// decide whether it is worth providing a desugared form of the type
+/// or not.
+QualType QualType::getDesugaredType(bool ForDisplay) const {
+ return getTypePtr()->getDesugaredType(ForDisplay)
+ .getWithAdditionalQualifiers(getCVRQualifiers());
+}
+
+/// getDesugaredType - Return the specified type with any "sugar" removed from
+/// type type. This takes off typedefs, typeof's etc. If the outer level of
+/// the type is already concrete, it returns it unmodified. This is similar
+/// to getting the canonical type, but it doesn't remove *all* typedefs. For
+/// example, it return "T*" as "T*", (not as "int*"), because the pointer is
+/// concrete.
+///
+/// \param ForDisplay When true, the desugaring is provided for
+/// display purposes only. In this case, we apply more heuristics to
+/// decide whether it is worth providing a desugared form of the type
+/// or not.
+QualType Type::getDesugaredType(bool ForDisplay) const {
+ if (const TypedefType *TDT = dyn_cast<TypedefType>(this))
+ return TDT->LookThroughTypedefs().getDesugaredType();
+ if (const TypeOfExprType *TOE = dyn_cast<TypeOfExprType>(this))
+ return TOE->getUnderlyingExpr()->getType().getDesugaredType();
+ if (const TypeOfType *TOT = dyn_cast<TypeOfType>(this))
+ return TOT->getUnderlyingType().getDesugaredType();
+ if (const TemplateSpecializationType *Spec
+ = dyn_cast<TemplateSpecializationType>(this)) {
+ if (ForDisplay)
+ return QualType(this, 0);
+
+ QualType Canon = Spec->getCanonicalTypeInternal();
+ if (Canon->getAsTemplateSpecializationType())
+ return QualType(this, 0);
+ return Canon->getDesugaredType();
+ }
+ if (const QualifiedNameType *QualName = dyn_cast<QualifiedNameType>(this)) {
+ if (ForDisplay) {
+ // If desugaring the type that the qualified name is referring to
+ // produces something interesting, that's our desugared type.
+ QualType NamedType = QualName->getNamedType().getDesugaredType();
+ if (NamedType != QualName->getNamedType())
+ return NamedType;
+ } else
+ return QualName->getNamedType().getDesugaredType();
+ }
+
+ return QualType(this, 0);
+}
+
+/// isVoidType - Helper method to determine if this is the 'void' type.
+bool Type::isVoidType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Void;
+ if (const ExtQualType *AS = dyn_cast<ExtQualType>(CanonicalType))
+ return AS->getBaseType()->isVoidType();
+ return false;
+}
+
+bool Type::isObjectType() const {
+ if (isa<FunctionType>(CanonicalType) || isa<ReferenceType>(CanonicalType) ||
+ isa<IncompleteArrayType>(CanonicalType) || isVoidType())
+ return false;
+ if (const ExtQualType *AS = dyn_cast<ExtQualType>(CanonicalType))
+ return AS->getBaseType()->isObjectType();
+ return true;
+}
+
+bool Type::isDerivedType() const {
+ switch (CanonicalType->getTypeClass()) {
+ case ExtQual:
+ return cast<ExtQualType>(CanonicalType)->getBaseType()->isDerivedType();
+ case Pointer:
+ case VariableArray:
+ case ConstantArray:
+ case IncompleteArray:
+ case FunctionProto:
+ case FunctionNoProto:
+ case LValueReference:
+ case RValueReference:
+ case Record:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool Type::isClassType() const {
+ if (const RecordType *RT = getAsRecordType())
+ return RT->getDecl()->isClass();
+ return false;
+}
+bool Type::isStructureType() const {
+ if (const RecordType *RT = getAsRecordType())
+ return RT->getDecl()->isStruct();
+ return false;
+}
+bool Type::isUnionType() const {
+ if (const RecordType *RT = getAsRecordType())
+ return RT->getDecl()->isUnion();
+ return false;
+}
+
+bool Type::isComplexType() const {
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ if (const ExtQualType *AS = dyn_cast<ExtQualType>(CanonicalType))
+ return AS->getBaseType()->isComplexType();
+ return false;
+}
+
+bool Type::isComplexIntegerType() const {
+ // Check for GCC complex integer extension.
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isIntegerType();
+ if (const ExtQualType *AS = dyn_cast<ExtQualType>(CanonicalType))
+ return AS->getBaseType()->isComplexIntegerType();
+ return false;
+}
+
+const ComplexType *Type::getAsComplexIntegerType() const {
+ // Are we directly a complex type?
+ if (const ComplexType *CTy = dyn_cast<ComplexType>(this)) {
+ if (CTy->getElementType()->isIntegerType())
+ return CTy;
+ return 0;
+ }
+
+ // If the canonical form of this type isn't what we want, reject it.
+ if (!isa<ComplexType>(CanonicalType)) {
+ // Look through type qualifiers (e.g. ExtQualType's).
+ if (isa<ComplexType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsComplexIntegerType();
+ return 0;
+ }
+
+ // If this is a typedef for a complex type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ComplexType>(getDesugaredType());
+}
+
+const BuiltinType *Type::getAsBuiltinType() const {
+ // If this is directly a builtin type, return it.
+ if (const BuiltinType *BTy = dyn_cast<BuiltinType>(this))
+ return BTy;
+
+ // If the canonical form of this type isn't a builtin type, reject it.
+ if (!isa<BuiltinType>(CanonicalType)) {
+ // Look through type qualifiers (e.g. ExtQualType's).
+ if (isa<BuiltinType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsBuiltinType();
+ return 0;
+ }
+
+ // If this is a typedef for a builtin type, strip the typedef off without
+ // losing all typedef information.
+ return cast<BuiltinType>(getDesugaredType());
+}
+
+const FunctionType *Type::getAsFunctionType() const {
+ // If this is directly a function type, return it.
+ if (const FunctionType *FTy = dyn_cast<FunctionType>(this))
+ return FTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<FunctionType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<FunctionType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsFunctionType();
+ return 0;
+ }
+
+ // If this is a typedef for a function type, strip the typedef off without
+ // losing all typedef information.
+ return cast<FunctionType>(getDesugaredType());
+}
+
+const FunctionNoProtoType *Type::getAsFunctionNoProtoType() const {
+ return dyn_cast_or_null<FunctionNoProtoType>(getAsFunctionType());
+}
+
+const FunctionProtoType *Type::getAsFunctionProtoType() const {
+ return dyn_cast_or_null<FunctionProtoType>(getAsFunctionType());
+}
+
+
+const PointerType *Type::getAsPointerType() const {
+ // If this is directly a pointer type, return it.
+ if (const PointerType *PTy = dyn_cast<PointerType>(this))
+ return PTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<PointerType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<PointerType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsPointerType();
+ return 0;
+ }
+
+ // If this is a typedef for a pointer type, strip the typedef off without
+ // losing all typedef information.
+ return cast<PointerType>(getDesugaredType());
+}
+
+const BlockPointerType *Type::getAsBlockPointerType() const {
+ // If this is directly a block pointer type, return it.
+ if (const BlockPointerType *PTy = dyn_cast<BlockPointerType>(this))
+ return PTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<BlockPointerType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<BlockPointerType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsBlockPointerType();
+ return 0;
+ }
+
+ // If this is a typedef for a block pointer type, strip the typedef off
+ // without losing all typedef information.
+ return cast<BlockPointerType>(getDesugaredType());
+}
+
+const ReferenceType *Type::getAsReferenceType() const {
+ // If this is directly a reference type, return it.
+ if (const ReferenceType *RTy = dyn_cast<ReferenceType>(this))
+ return RTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ReferenceType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<ReferenceType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsReferenceType();
+ return 0;
+ }
+
+ // If this is a typedef for a reference type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ReferenceType>(getDesugaredType());
+}
+
+const LValueReferenceType *Type::getAsLValueReferenceType() const {
+ // If this is directly an lvalue reference type, return it.
+ if (const LValueReferenceType *RTy = dyn_cast<LValueReferenceType>(this))
+ return RTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<LValueReferenceType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<LValueReferenceType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsLValueReferenceType();
+ return 0;
+ }
+
+ // If this is a typedef for an lvalue reference type, strip the typedef off
+ // without losing all typedef information.
+ return cast<LValueReferenceType>(getDesugaredType());
+}
+
+const RValueReferenceType *Type::getAsRValueReferenceType() const {
+ // If this is directly an rvalue reference type, return it.
+ if (const RValueReferenceType *RTy = dyn_cast<RValueReferenceType>(this))
+ return RTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<RValueReferenceType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<RValueReferenceType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsRValueReferenceType();
+ return 0;
+ }
+
+ // If this is a typedef for an rvalue reference type, strip the typedef off
+ // without losing all typedef information.
+ return cast<RValueReferenceType>(getDesugaredType());
+}
+
+const MemberPointerType *Type::getAsMemberPointerType() const {
+ // If this is directly a member pointer type, return it.
+ if (const MemberPointerType *MTy = dyn_cast<MemberPointerType>(this))
+ return MTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<MemberPointerType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<MemberPointerType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsMemberPointerType();
+ return 0;
+ }
+
+ // If this is a typedef for a member pointer type, strip the typedef off
+ // without losing all typedef information.
+ return cast<MemberPointerType>(getDesugaredType());
+}
+
+/// isVariablyModifiedType (C99 6.7.5p3) - Return true for variable length
+/// array types and types that contain variable array types in their
+/// declarator
+bool Type::isVariablyModifiedType() const {
+ // A VLA is a variably modified type.
+ if (isVariableArrayType())
+ return true;
+
+ // An array can contain a variably modified type
+ if (const Type *T = getArrayElementTypeNoTypeQual())
+ return T->isVariablyModifiedType();
+
+ // A pointer can point to a variably modified type.
+ // Also, C++ references and member pointers can point to a variably modified
+ // type, where VLAs appear as an extension to C++, and should be treated
+ // correctly.
+ if (const PointerType *PT = getAsPointerType())
+ return PT->getPointeeType()->isVariablyModifiedType();
+ if (const ReferenceType *RT = getAsReferenceType())
+ return RT->getPointeeType()->isVariablyModifiedType();
+ if (const MemberPointerType *PT = getAsMemberPointerType())
+ return PT->getPointeeType()->isVariablyModifiedType();
+
+ // A function can return a variably modified type
+ // This one isn't completely obvious, but it follows from the
+ // definition in C99 6.7.5p3. Because of this rule, it's
+ // illegal to declare a function returning a variably modified type.
+ if (const FunctionType *FT = getAsFunctionType())
+ return FT->getResultType()->isVariablyModifiedType();
+
+ return false;
+}
+
+const RecordType *Type::getAsRecordType() const {
+ // If this is directly a record type, return it.
+ if (const RecordType *RTy = dyn_cast<RecordType>(this))
+ return RTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<RecordType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<RecordType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsRecordType();
+ return 0;
+ }
+
+ // If this is a typedef for a record type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getDesugaredType());
+}
+
+const TagType *Type::getAsTagType() const {
+ // If this is directly a tag type, return it.
+ if (const TagType *TagTy = dyn_cast<TagType>(this))
+ return TagTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<TagType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<TagType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsTagType();
+ return 0;
+ }
+
+ // If this is a typedef for a tag type, strip the typedef off without
+ // losing all typedef information.
+ return cast<TagType>(getDesugaredType());
+}
+
+const RecordType *Type::getAsStructureType() const {
+ // If this is directly a structure type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isStruct())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isStruct())
+ return 0;
+
+ // If this is a typedef for a structure type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getDesugaredType());
+ }
+ // Look through type qualifiers
+ if (isa<RecordType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsStructureType();
+ return 0;
+}
+
+const RecordType *Type::getAsUnionType() const {
+ // If this is directly a union type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isUnion())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isUnion())
+ return 0;
+
+ // If this is a typedef for a union type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getDesugaredType());
+ }
+
+ // Look through type qualifiers
+ if (isa<RecordType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsUnionType();
+ return 0;
+}
+
+const EnumType *Type::getAsEnumType() const {
+ // Check the canonicalized unqualified type directly; the more complex
+ // version is unnecessary because there isn't any typedef information
+ // to preserve.
+ return dyn_cast<EnumType>(CanonicalType.getUnqualifiedType());
+}
+
+const ComplexType *Type::getAsComplexType() const {
+ // Are we directly a complex type?
+ if (const ComplexType *CTy = dyn_cast<ComplexType>(this))
+ return CTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ComplexType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<ComplexType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsComplexType();
+ return 0;
+ }
+
+ // If this is a typedef for a complex type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ComplexType>(getDesugaredType());
+}
+
+const VectorType *Type::getAsVectorType() const {
+ // Are we directly a vector type?
+ if (const VectorType *VTy = dyn_cast<VectorType>(this))
+ return VTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<VectorType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<VectorType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsVectorType();
+ return 0;
+ }
+
+ // If this is a typedef for a vector type, strip the typedef off without
+ // losing all typedef information.
+ return cast<VectorType>(getDesugaredType());
+}
+
+const ExtVectorType *Type::getAsExtVectorType() const {
+ // Are we directly an OpenCU vector type?
+ if (const ExtVectorType *VTy = dyn_cast<ExtVectorType>(this))
+ return VTy;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ExtVectorType>(CanonicalType)) {
+ // Look through type qualifiers
+ if (isa<ExtVectorType>(CanonicalType.getUnqualifiedType()))
+ return CanonicalType.getUnqualifiedType()->getAsExtVectorType();
+ return 0;
+ }
+
+ // If this is a typedef for an extended vector type, strip the typedef off
+ // without losing all typedef information.
+ return cast<ExtVectorType>(getDesugaredType());
+}
+
+const ObjCInterfaceType *Type::getAsObjCInterfaceType() const {
+ // There is no sugar for ObjCInterfaceType's, just return the canonical
+ // type pointer if it is the right class. There is no typedef information to
+ // return and these cannot be Address-space qualified.
+ return dyn_cast<ObjCInterfaceType>(CanonicalType.getUnqualifiedType());
+}
+
+const ObjCQualifiedInterfaceType *
+Type::getAsObjCQualifiedInterfaceType() const {
+ // There is no sugar for ObjCQualifiedInterfaceType's, just return the
+ // canonical type pointer if it is the right class.
+ return dyn_cast<ObjCQualifiedInterfaceType>(CanonicalType.getUnqualifiedType());
+}
+
+const ObjCQualifiedIdType *Type::getAsObjCQualifiedIdType() const {
+ // There is no sugar for ObjCQualifiedIdType's, just return the canonical
+ // type pointer if it is the right class.
+ return dyn_cast<ObjCQualifiedIdType>(CanonicalType.getUnqualifiedType());
+}
+
+const TemplateTypeParmType *Type::getAsTemplateTypeParmType() const {
+ // There is no sugar for template type parameters, so just return
+ // the canonical type pointer if it is the right class.
+ // FIXME: can these be address-space qualified?
+ return dyn_cast<TemplateTypeParmType>(CanonicalType);
+}
+
+const TemplateSpecializationType *
+Type::getAsTemplateSpecializationType() const {
+ // There is no sugar for class template specialization types, so
+ // just return the canonical type pointer if it is the right class.
+ return dyn_cast<TemplateSpecializationType>(CanonicalType);
+}
+
+bool Type::isIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+ if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
+ return true;
+ if (isa<FixedWidthIntType>(CanonicalType))
+ return true;
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isIntegerType();
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isIntegerType();
+ return false;
+}
+
+bool Type::isIntegralType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongLong;
+ if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
+ if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
+ return true; // Complete enum types are integral.
+ // FIXME: In C++, enum types are never integral.
+ if (isa<FixedWidthIntType>(CanonicalType))
+ return true;
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isIntegralType();
+ return false;
+}
+
+bool Type::isEnumeralType() const {
+ if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
+ return TT->getDecl()->isEnum();
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isEnumeralType();
+ return false;
+}
+
+bool Type::isBooleanType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Bool;
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isBooleanType();
+ return false;
+}
+
+bool Type::isCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char_U ||
+ BT->getKind() == BuiltinType::UChar ||
+ BT->getKind() == BuiltinType::Char_S ||
+ BT->getKind() == BuiltinType::SChar;
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isCharType();
+ return false;
+}
+
+bool Type::isWideCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::WChar;
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isWideCharType();
+ return false;
+}
+
+/// isSignedIntegerType - Return true if this is an integer type that is
+/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
+/// an enum decl which has a signed representation, or a vector of signed
+/// integer element type.
+bool Type::isSignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Char_S &&
+ BT->getKind() <= BuiltinType::LongLong;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+
+ if (const FixedWidthIntType *FWIT =
+ dyn_cast<FixedWidthIntType>(CanonicalType))
+ return FWIT->isSigned();
+
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isSignedIntegerType();
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isSignedIntegerType();
+ return false;
+}
+
+/// isUnsignedIntegerType - Return true if this is an integer type that is
+/// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum
+/// decl which has an unsigned representation, or a vector of unsigned integer
+/// element type.
+bool Type::isUnsignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::ULongLong;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+
+ if (const FixedWidthIntType *FWIT =
+ dyn_cast<FixedWidthIntType>(CanonicalType))
+ return !FWIT->isSigned();
+
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isUnsignedIntegerType();
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isUnsignedIntegerType();
+ return false;
+}
+
+bool Type::isFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Float &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isFloatingType();
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isFloatingType();
+ return false;
+}
+
+bool Type::isRealFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Float &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isRealFloatingType();
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isRealFloatingType();
+ return false;
+}
+
+bool Type::isRealType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
+ return TT->getDecl()->isEnum() && TT->getDecl()->isDefinition();
+ if (isa<FixedWidthIntType>(CanonicalType))
+ return true;
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isRealType();
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isRealType();
+ return false;
+}
+
+bool Type::isArithmeticType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
+ // If a body isn't seen by the time we get here, return false.
+ return ET->getDecl()->isDefinition();
+ if (isa<FixedWidthIntType>(CanonicalType))
+ return true;
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isArithmeticType();
+ return isa<ComplexType>(CanonicalType) || isa<VectorType>(CanonicalType);
+}
+
+bool Type::isScalarType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() != BuiltinType::Void;
+ if (const TagType *TT = dyn_cast<TagType>(CanonicalType)) {
+ // Enums are scalar types, but only if they are defined. Incomplete enums
+ // are not treated as scalar types.
+ if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
+ return true;
+ return false;
+ }
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isScalarType();
+ if (isa<FixedWidthIntType>(CanonicalType))
+ return true;
+ return isa<PointerType>(CanonicalType) ||
+ isa<BlockPointerType>(CanonicalType) ||
+ isa<MemberPointerType>(CanonicalType) ||
+ isa<ComplexType>(CanonicalType) ||
+ isa<ObjCQualifiedIdType>(CanonicalType);
+}
+
+/// \brief Determines whether the type is a C++ aggregate type or C
+/// aggregate or union type.
+///
+/// An aggregate type is an array or a class type (struct, union, or
+/// class) that has no user-declared constructors, no private or
+/// protected non-static data members, no base classes, and no virtual
+/// functions (C++ [dcl.init.aggr]p1). The notion of an aggregate type
+/// subsumes the notion of C aggregates (C99 6.2.5p21) because it also
+/// includes union types.
+bool Type::isAggregateType() const {
+ if (const RecordType *Record = dyn_cast<RecordType>(CanonicalType)) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
+ return ClassDecl->isAggregate();
+
+ return true;
+ }
+
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isAggregateType();
+ return isa<ArrayType>(CanonicalType);
+}
+
+/// isConstantSizeType - Return true if this is not a variable sized type,
+/// according to the rules of C99 6.7.5p3. It is not legal to call this on
+/// incomplete types or dependent types.
+bool Type::isConstantSizeType() const {
+ if (const ExtQualType *EXTQT = dyn_cast<ExtQualType>(CanonicalType))
+ return EXTQT->getBaseType()->isConstantSizeType();
+ assert(!isIncompleteType() && "This doesn't make sense for incomplete types");
+ assert(!isDependentType() && "This doesn't make sense for dependent types");
+ // The VAT must have a size, as it is known to be complete.
+ return !isa<VariableArrayType>(CanonicalType);
+}
+
+/// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1)
+/// - a type that can describe objects, but which lacks information needed to
+/// determine its size.
+bool Type::isIncompleteType() const {
+ switch (CanonicalType->getTypeClass()) {
+ default: return false;
+ case ExtQual:
+ return cast<ExtQualType>(CanonicalType)->getBaseType()->isIncompleteType();
+ case Builtin:
+ // Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never
+ // be completed.
+ return isVoidType();
+ case Record:
+ case Enum:
+ // A tagged type (struct/union/enum/class) is incomplete if the decl is a
+ // forward declaration, but not a full definition (C99 6.2.5p22).
+ return !cast<TagType>(CanonicalType)->getDecl()->isDefinition();
+ case IncompleteArray:
+ // An array of unknown size is an incomplete type (C99 6.2.5p22).
+ return true;
+ case ObjCInterface:
+ case ObjCQualifiedInterface:
+ // ObjC interfaces are incomplete if they are @class, not @interface.
+ return cast<ObjCInterfaceType>(this)->getDecl()->isForwardDecl();
+ }
+}
+
+/// isPODType - Return true if this is a plain-old-data type (C++ 3.9p10)
+bool Type::isPODType() const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case.
+ if (isIncompleteType())
+ return false;
+
+ switch (CanonicalType->getTypeClass()) {
+ // Everything not explicitly mentioned is not POD.
+ default: return false;
+ case ExtQual:
+ return cast<ExtQualType>(CanonicalType)->getBaseType()->isPODType();
+ case VariableArray:
+ case ConstantArray:
+ // IncompleteArray is caught by isIncompleteType() above.
+ return cast<ArrayType>(CanonicalType)->getElementType()->isPODType();
+
+ case Builtin:
+ case Complex:
+ case Pointer:
+ case MemberPointer:
+ case Vector:
+ case ExtVector:
+ case ObjCQualifiedId:
+ return true;
+
+ case Enum:
+ return true;
+
+ case Record:
+ if (CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
+ return ClassDecl->isPOD();
+
+ // C struct/union is POD.
+ return true;
+ }
+}
+
+bool Type::isPromotableIntegerType() const {
+ if (const BuiltinType *BT = getAsBuiltinType())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool Type::isNullPtrType() const {
+ if (const BuiltinType *BT = getAsBuiltinType())
+ return BT->getKind() == BuiltinType::NullPtr;
+ return false;
+}
+
+bool Type::isSpecifierType() const {
+ // Note that this intentionally does not use the canonical type.
+ switch (getTypeClass()) {
+ case Builtin:
+ case Record:
+ case Enum:
+ case Typedef:
+ case Complex:
+ case TypeOfExpr:
+ case TypeOf:
+ case TemplateTypeParm:
+ case TemplateSpecialization:
+ case QualifiedName:
+ case Typename:
+ case ObjCInterface:
+ case ObjCQualifiedInterface:
+ case ObjCQualifiedId:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const char *BuiltinType::getName(bool CPlusPlus) const {
+ switch (getKind()) {
+ default: assert(0 && "Unknown builtin type!");
+ case Void: return "void";
+ case Bool: return CPlusPlus? "bool" : "_Bool";
+ case Char_S: return "char";
+ case Char_U: return "char";
+ case SChar: return "signed char";
+ case Short: return "short";
+ case Int: return "int";
+ case Long: return "long";
+ case LongLong: return "long long";
+ case Int128: return "__int128_t";
+ case UChar: return "unsigned char";
+ case UShort: return "unsigned short";
+ case UInt: return "unsigned int";
+ case ULong: return "unsigned long";
+ case ULongLong: return "unsigned long long";
+ case UInt128: return "__uint128_t";
+ case Float: return "float";
+ case Double: return "double";
+ case LongDouble: return "long double";
+ case WChar: return "wchar_t";
+ case NullPtr: return "nullptr_t";
+ case Overload: return "<overloaded function type>";
+ case Dependent: return "<dependent type>";
+ }
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
+ arg_type_iterator ArgTys,
+ unsigned NumArgs, bool isVariadic,
+ unsigned TypeQuals, bool hasExceptionSpec,
+ bool anyExceptionSpec, unsigned NumExceptions,
+ exception_iterator Exs) {
+ ID.AddPointer(Result.getAsOpaquePtr());
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddPointer(ArgTys[i].getAsOpaquePtr());
+ ID.AddInteger(isVariadic);
+ ID.AddInteger(TypeQuals);
+ ID.AddInteger(hasExceptionSpec);
+ if (hasExceptionSpec) {
+ ID.AddInteger(anyExceptionSpec);
+ for(unsigned i = 0; i != NumExceptions; ++i)
+ ID.AddPointer(Exs[i].getAsOpaquePtr());
+ }
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getResultType(), arg_type_begin(), NumArgs, isVariadic(),
+ getTypeQuals(), hasExceptionSpec(), hasAnyExceptionSpec(),
+ getNumExceptions(), exception_begin());
+}
+
+void ObjCQualifiedInterfaceType::Profile(llvm::FoldingSetNodeID &ID,
+ const ObjCInterfaceDecl *Decl,
+ ObjCProtocolDecl **protocols,
+ unsigned NumProtocols) {
+ ID.AddPointer(Decl);
+ for (unsigned i = 0; i != NumProtocols; i++)
+ ID.AddPointer(protocols[i]);
+}
+
+void ObjCQualifiedInterfaceType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getDecl(), &Protocols[0], getNumProtocols());
+}
+
+void ObjCQualifiedIdType::Profile(llvm::FoldingSetNodeID &ID,
+ ObjCProtocolDecl **protocols,
+ unsigned NumProtocols) {
+ for (unsigned i = 0; i != NumProtocols; i++)
+ ID.AddPointer(protocols[i]);
+}
+
+void ObjCQualifiedIdType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, &Protocols[0], getNumProtocols());
+}
+
+/// LookThroughTypedefs - Return the ultimate type this typedef corresponds to
+/// potentially looking through *all* consequtive typedefs. This returns the
+/// sum of the type qualifiers, so if you have:
+/// typedef const int A;
+/// typedef volatile A B;
+/// looking through the typedefs for B will give you "const volatile A".
+///
+QualType TypedefType::LookThroughTypedefs() const {
+ // Usually, there is only a single level of typedefs, be fast in that case.
+ QualType FirstType = getDecl()->getUnderlyingType();
+ if (!isa<TypedefType>(FirstType))
+ return FirstType;
+
+ // Otherwise, do the fully general loop.
+ unsigned TypeQuals = 0;
+ const TypedefType *TDT = this;
+ while (1) {
+ QualType CurType = TDT->getDecl()->getUnderlyingType();
+
+
+ /// FIXME:
+ /// FIXME: This is incorrect for ExtQuals!
+ /// FIXME:
+ TypeQuals |= CurType.getCVRQualifiers();
+
+ TDT = dyn_cast<TypedefType>(CurType);
+ if (TDT == 0)
+ return QualType(CurType.getTypePtr(), TypeQuals);
+ }
+}
+
+TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
+ : Type(TypeOfExpr, can, E->isTypeDependent()), TOExpr(E) {
+ assert(!isa<TypedefType>(can) && "Invalid canonical type");
+}
+
+TagType::TagType(TypeClass TC, TagDecl *D, QualType can)
+ : Type(TC, can, D->isDependentType()), decl(D, 0) {}
+
+bool RecordType::classof(const TagType *TT) {
+ return isa<RecordDecl>(TT->getDecl());
+}
+
+bool EnumType::classof(const TagType *TT) {
+ return isa<EnumDecl>(TT->getDecl());
+}
+
+bool
+TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgument *Args, unsigned NumArgs) {
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx) {
+ switch (Args[Idx].getKind()) {
+ case TemplateArgument::Type:
+ if (Args[Idx].getAsType()->isDependentType())
+ return true;
+ break;
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ // Never dependent
+ break;
+
+ case TemplateArgument::Expression:
+ if (Args[Idx].getAsExpr()->isTypeDependent() ||
+ Args[Idx].getAsExpr()->isValueDependent())
+ return true;
+ break;
+ }
+ }
+
+ return false;
+}
+
+TemplateSpecializationType::
+TemplateSpecializationType(TemplateName T, const TemplateArgument *Args,
+ unsigned NumArgs, QualType Canon)
+ : Type(TemplateSpecialization,
+ Canon.isNull()? QualType(this, 0) : Canon,
+ T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)),
+ Template(T), NumArgs(NumArgs)
+{
+ assert((!Canon.isNull() ||
+ T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)) &&
+ "No canonical type for non-dependent class template specialization");
+
+ TemplateArgument *TemplateArgs
+ = reinterpret_cast<TemplateArgument *>(this + 1);
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg)
+ new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
+}
+
+void TemplateSpecializationType::Destroy(ASTContext& C) {
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // FIXME: Not all expressions get cloned, so we can't yet perform
+ // this destruction.
+ // if (Expr *E = getArg(Arg).getAsExpr())
+ // E->Destroy(C);
+ }
+}
+
+TemplateSpecializationType::iterator
+TemplateSpecializationType::end() const {
+ return begin() + getNumArgs();
+}
+
+const TemplateArgument &
+TemplateSpecializationType::getArg(unsigned Idx) const {
+ assert(Idx < getNumArgs() && "Template argument out of range");
+ return getArgs()[Idx];
+}
+
+void
+TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ T.Profile(ID);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID);
+}
+
+//===----------------------------------------------------------------------===//
+// Type Printing
+//===----------------------------------------------------------------------===//
+
+void QualType::dump(const char *msg) const {
+ PrintingPolicy Policy;
+ std::string R = "identifier";
+ getAsStringInternal(R, Policy);
+ if (msg)
+ fprintf(stderr, "%s: %s\n", msg, R.c_str());
+ else
+ fprintf(stderr, "%s\n", R.c_str());
+}
+void QualType::dump() const {
+ dump("");
+}
+
+void Type::dump() const {
+ std::string S = "identifier";
+ getAsStringInternal(S, PrintingPolicy());
+ fprintf(stderr, "%s\n", S.c_str());
+}
+
+
+
+static void AppendTypeQualList(std::string &S, unsigned TypeQuals) {
+ // Note: funkiness to ensure we get a space only between quals.
+ bool NonePrinted = true;
+ if (TypeQuals & QualType::Const)
+ S += "const", NonePrinted = false;
+ if (TypeQuals & QualType::Volatile)
+ S += (NonePrinted+" volatile"), NonePrinted = false;
+ if (TypeQuals & QualType::Restrict)
+ S += (NonePrinted+" restrict"), NonePrinted = false;
+}
+
+std::string QualType::getAsString() const {
+ std::string S;
+ getAsStringInternal(S, PrintingPolicy());
+ return S;
+}
+
+void
+QualType::getAsStringInternal(std::string &S,
+ const PrintingPolicy &Policy) const {
+ if (isNull()) {
+ S += "NULL TYPE";
+ return;
+ }
+
+ if (Policy.SuppressSpecifiers && getTypePtr()->isSpecifierType())
+ return;
+
+ // Print qualifiers as appropriate.
+ if (unsigned Tq = getCVRQualifiers()) {
+ std::string TQS;
+ AppendTypeQualList(TQS, Tq);
+ if (!S.empty())
+ S = TQS + ' ' + S;
+ else
+ S = TQS;
+ }
+
+ getTypePtr()->getAsStringInternal(S, Policy);
+}
+
+void BuiltinType::getAsStringInternal(std::string &S,
+ const PrintingPolicy &Policy) const {
+ if (S.empty()) {
+ S = getName(Policy.CPlusPlus);
+ } else {
+ // Prefix the basic type, e.g. 'int X'.
+ S = ' ' + S;
+ S = getName(Policy.CPlusPlus) + S;
+ }
+}
+
+void FixedWidthIntType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ // FIXME: Once we get bitwidth attribute, write as
+ // "int __attribute__((bitwidth(x)))".
+ std::string prefix = "__clang_fixedwidth";
+ prefix += llvm::utostr_32(Width);
+ prefix += (char)(Signed ? 'S' : 'U');
+ if (S.empty()) {
+ S = prefix;
+ } else {
+ // Prefix the basic type, e.g. 'int X'.
+ S = prefix + S;
+ }
+}
+
+
+void ComplexType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ ElementType->getAsStringInternal(S, Policy);
+ S = "_Complex " + S;
+}
+
+void ExtQualType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ bool NeedsSpace = false;
+ if (AddressSpace) {
+ S = "__attribute__((address_space("+llvm::utostr_32(AddressSpace)+")))" + S;
+ NeedsSpace = true;
+ }
+ if (GCAttrType != QualType::GCNone) {
+ if (NeedsSpace)
+ S += ' ';
+ S += "__attribute__((objc_gc(";
+ if (GCAttrType == QualType::Weak)
+ S += "weak";
+ else
+ S += "strong";
+ S += ")))";
+ }
+ BaseType->getAsStringInternal(S, Policy);
+}
+
+void PointerType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S = '*' + S;
+
+ // Handle things like 'int (*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(getPointeeType()))
+ S = '(' + S + ')';
+
+ getPointeeType().getAsStringInternal(S, Policy);
+}
+
+void BlockPointerType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S = '^' + S;
+ PointeeType.getAsStringInternal(S, Policy);
+}
+
+void LValueReferenceType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S = '&' + S;
+
+ // Handle things like 'int (&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(getPointeeType()))
+ S = '(' + S + ')';
+
+ getPointeeType().getAsStringInternal(S, Policy);
+}
+
+void RValueReferenceType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S = "&&" + S;
+
+ // Handle things like 'int (&&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(getPointeeType()))
+ S = '(' + S + ')';
+
+ getPointeeType().getAsStringInternal(S, Policy);
+}
+
+void MemberPointerType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ std::string C;
+ Class->getAsStringInternal(C, Policy);
+ C += "::*";
+ S = C + S;
+
+ // Handle things like 'int (Cls::*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(getPointeeType()))
+ S = '(' + S + ')';
+
+ getPointeeType().getAsStringInternal(S, Policy);
+}
+
+void ConstantArrayType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S += '[';
+ S += llvm::utostr(getSize().getZExtValue());
+ S += ']';
+
+ getElementType().getAsStringInternal(S, Policy);
+}
+
+void IncompleteArrayType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S += "[]";
+
+ getElementType().getAsStringInternal(S, Policy);
+}
+
+void VariableArrayType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S += '[';
+
+ if (getIndexTypeQualifier()) {
+ AppendTypeQualList(S, getIndexTypeQualifier());
+ S += ' ';
+ }
+
+ if (getSizeModifier() == Static)
+ S += "static";
+ else if (getSizeModifier() == Star)
+ S += '*';
+
+ if (getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ']';
+
+ getElementType().getAsStringInternal(S, Policy);
+}
+
+void DependentSizedArrayType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S += '[';
+
+ if (getIndexTypeQualifier()) {
+ AppendTypeQualList(S, getIndexTypeQualifier());
+ S += ' ';
+ }
+
+ if (getSizeModifier() == Static)
+ S += "static";
+ else if (getSizeModifier() == Star)
+ S += '*';
+
+ if (getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ']';
+
+ getElementType().getAsStringInternal(S, Policy);
+}
+
+void VectorType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ S += " __attribute__((__vector_size__(";
+ S += llvm::utostr_32(NumElements); // convert back to bytes.
+ S += " * sizeof(" + ElementType.getAsString() + "))))";
+ ElementType.getAsStringInternal(S, Policy);
+}
+
+void ExtVectorType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ S += " __attribute__((ext_vector_type(";
+ S += llvm::utostr_32(NumElements);
+ S += ")))";
+ ElementType.getAsStringInternal(S, Policy);
+}
+
+void TypeOfExprType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'typeof(e) X'.
+ InnerString = ' ' + InnerString;
+ std::string Str;
+ llvm::raw_string_ostream s(Str);
+ getUnderlyingExpr()->printPretty(s, 0, Policy);
+ InnerString = "typeof " + s.str() + InnerString;
+}
+
+void TypeOfType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'typeof(t) X'.
+ InnerString = ' ' + InnerString;
+ std::string Tmp;
+ getUnderlyingType().getAsStringInternal(Tmp, Policy);
+ InnerString = "typeof(" + Tmp + ")" + InnerString;
+}
+
+void FunctionNoProtoType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!S.empty())
+ S = "(" + S + ")";
+
+ S += "()";
+ getResultType().getAsStringInternal(S, Policy);
+}
+
+void FunctionProtoType::getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!S.empty())
+ S = "(" + S + ")";
+
+ S += "(";
+ std::string Tmp;
+ PrintingPolicy ParamPolicy(Policy);
+ ParamPolicy.SuppressSpecifiers = false;
+ for (unsigned i = 0, e = getNumArgs(); i != e; ++i) {
+ if (i) S += ", ";
+ getArgType(i).getAsStringInternal(Tmp, ParamPolicy);
+ S += Tmp;
+ Tmp.clear();
+ }
+
+ if (isVariadic()) {
+ if (getNumArgs())
+ S += ", ";
+ S += "...";
+ } else if (getNumArgs() == 0) {
+ // Do not emit int() if we have a proto, emit 'int(void)'.
+ S += "void";
+ }
+
+ S += ")";
+ getResultType().getAsStringInternal(S, Policy);
+}
+
+
+void TypedefType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ InnerString = ' ' + InnerString;
+ InnerString = getDecl()->getIdentifier()->getName() + InnerString;
+}
+
+void TemplateTypeParmType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'parmname X'.
+ InnerString = ' ' + InnerString;
+
+ if (!Name)
+ InnerString = "type-parameter-" + llvm::utostr_32(Depth) + '-' +
+ llvm::utostr_32(Index) + InnerString;
+ else
+ InnerString = Name->getName() + InnerString;
+}
+
+std::string
+TemplateSpecializationType::PrintTemplateArgumentList(
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy) {
+ std::string SpecString;
+ SpecString += '<';
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (Arg)
+ SpecString += ", ";
+
+ // Print the argument into a string.
+ std::string ArgString;
+ switch (Args[Arg].getKind()) {
+ case TemplateArgument::Type:
+ Args[Arg].getAsType().getAsStringInternal(ArgString, Policy);
+ break;
+
+ case TemplateArgument::Declaration:
+ ArgString = cast<NamedDecl>(Args[Arg].getAsDecl())->getNameAsString();
+ break;
+
+ case TemplateArgument::Integral:
+ ArgString = Args[Arg].getAsIntegral()->toString(10, true);
+ break;
+
+ case TemplateArgument::Expression: {
+ llvm::raw_string_ostream s(ArgString);
+ Args[Arg].getAsExpr()->printPretty(s, 0, Policy);
+ break;
+ }
+ }
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ SpecString += ' ';
+
+ SpecString += ArgString;
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (SpecString[SpecString.size() - 1] == '>')
+ SpecString += ' ';
+
+ SpecString += '>';
+
+ return SpecString;
+}
+
+void
+TemplateSpecializationType::
+getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ std::string SpecString;
+
+ {
+ llvm::raw_string_ostream OS(SpecString);
+ Template.print(OS, Policy);
+ }
+
+ SpecString += PrintTemplateArgumentList(getArgs(), getNumArgs(), Policy);
+ if (InnerString.empty())
+ InnerString.swap(SpecString);
+ else
+ InnerString = SpecString + ' ' + InnerString;
+}
+
+void QualifiedNameType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ std::string MyString;
+
+ {
+ llvm::raw_string_ostream OS(MyString);
+ NNS->print(OS, Policy);
+ }
+
+ std::string TypeStr;
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressTagKind = true;
+ NamedType.getAsStringInternal(TypeStr, InnerPolicy);
+
+ MyString += TypeStr;
+ if (InnerString.empty())
+ InnerString.swap(MyString);
+ else
+ InnerString = MyString + ' ' + InnerString;
+}
+
+void TypenameType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ std::string MyString;
+
+ {
+ llvm::raw_string_ostream OS(MyString);
+ OS << "typename ";
+ NNS->print(OS, Policy);
+
+ if (const IdentifierInfo *Ident = getIdentifier())
+ OS << Ident->getName();
+ else if (const TemplateSpecializationType *Spec = getTemplateId()) {
+ Spec->getTemplateName().print(OS, Policy, true);
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Spec->getArgs(),
+ Spec->getNumArgs(),
+ Policy);
+ }
+ }
+
+ if (InnerString.empty())
+ InnerString.swap(MyString);
+ else
+ InnerString = MyString + ' ' + InnerString;
+}
+
+void ObjCInterfaceType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ InnerString = ' ' + InnerString;
+ InnerString = getDecl()->getIdentifier()->getName() + InnerString;
+}
+
+void
+ObjCQualifiedInterfaceType::getAsStringInternal(std::string &InnerString,
+ const PrintingPolicy &Policy) const {
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ InnerString = ' ' + InnerString;
+ std::string ObjCQIString = getDecl()->getNameAsString();
+ ObjCQIString += '<';
+ bool isFirst = true;
+ for (qual_iterator I = qual_begin(), E = qual_end(); I != E; ++I) {
+ if (isFirst)
+ isFirst = false;
+ else
+ ObjCQIString += ',';
+ ObjCQIString += (*I)->getNameAsString();
+ }
+ ObjCQIString += '>';
+ InnerString = ObjCQIString + InnerString;
+}
+
+void ObjCQualifiedIdType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ InnerString = ' ' + InnerString;
+ std::string ObjCQIString = "id";
+ ObjCQIString += '<';
+ for (qual_iterator I = qual_begin(), E = qual_end(); I != E; ++I) {
+ ObjCQIString += (*I)->getNameAsString();
+ if (I+1 != E)
+ ObjCQIString += ',';
+ }
+ ObjCQIString += '>';
+ InnerString = ObjCQIString + InnerString;
+}
+
+void TagType::getAsStringInternal(std::string &InnerString, const PrintingPolicy &Policy) const {
+ if (Policy.SuppressTag)
+ return;
+
+ if (!InnerString.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ InnerString = ' ' + InnerString;
+
+ const char *Kind = Policy.SuppressTagKind? 0 : getDecl()->getKindName();
+ const char *ID;
+ if (const IdentifierInfo *II = getDecl()->getIdentifier())
+ ID = II->getName();
+ else if (TypedefDecl *Typedef = getDecl()->getTypedefForAnonDecl()) {
+ Kind = 0;
+ assert(Typedef->getIdentifier() && "Typedef without identifier?");
+ ID = Typedef->getIdentifier()->getName();
+ } else
+ ID = "<anonymous>";
+
+ // If this is a class template specialization, print the template
+ // arguments.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(getDecl())) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.getFlatArgumentList(),
+ TemplateArgs.flat_size(),
+ Policy);
+ InnerString = TemplateArgsStr + InnerString;
+ }
+
+ if (Kind) {
+ // Compute the full nested-name-specifier for this type. In C,
+ // this will always be empty.
+ std::string ContextStr;
+ for (DeclContext *DC = getDecl()->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ std::string MyPart;
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
+ if (NS->getIdentifier())
+ MyPart = NS->getNameAsString();
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.getFlatArgumentList(),
+ TemplateArgs.flat_size(),
+ Policy);
+ MyPart = Spec->getIdentifier()->getName() + TemplateArgsStr;
+ } else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
+ if (TypedefDecl *Typedef = Tag->getTypedefForAnonDecl())
+ MyPart = Typedef->getIdentifier()->getName();
+ else if (Tag->getIdentifier())
+ MyPart = Tag->getIdentifier()->getName();
+ }
+
+ if (!MyPart.empty())
+ ContextStr = MyPart + "::" + ContextStr;
+ }
+
+ InnerString = std::string(Kind) + " " + ContextStr + ID + InnerString;
+ } else
+ InnerString = ID + InnerString;
+}
diff --git a/lib/Analysis/BasicConstraintManager.cpp b/lib/Analysis/BasicConstraintManager.cpp
new file mode 100644
index 0000000..b272214
--- /dev/null
+++ b/lib/Analysis/BasicConstraintManager.cpp
@@ -0,0 +1,342 @@
+//== BasicConstraintManager.cpp - Manage basic constraints.------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicConstraintManager, a class that tracks simple
+// equality and inequality constraints on symbolic values of GRState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/PathSensitive/GRStateTrait.h"
+#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+
+namespace { class VISIBILITY_HIDDEN ConstNotEq {}; }
+namespace { class VISIBILITY_HIDDEN ConstEq {}; }
+
+typedef llvm::ImmutableMap<SymbolRef,GRState::IntSetTy> ConstNotEqTy;
+typedef llvm::ImmutableMap<SymbolRef,const llvm::APSInt*> ConstEqTy;
+
+static int ConstEqIndex = 0;
+static int ConstNotEqIndex = 0;
+
+namespace clang {
+template<>
+struct GRStateTrait<ConstNotEq> : public GRStatePartialTrait<ConstNotEqTy> {
+ static inline void* GDMIndex() { return &ConstNotEqIndex; }
+};
+
+template<>
+struct GRStateTrait<ConstEq> : public GRStatePartialTrait<ConstEqTy> {
+ static inline void* GDMIndex() { return &ConstEqIndex; }
+};
+}
+
+namespace {
+// BasicConstraintManager only tracks equality and inequality constraints of
+// constants and integer variables.
+class VISIBILITY_HIDDEN BasicConstraintManager
+ : public SimpleConstraintManager {
+ GRState::IntSetTy::Factory ISetFactory;
+public:
+ BasicConstraintManager(GRStateManager& statemgr)
+ : SimpleConstraintManager(statemgr), ISetFactory(statemgr.getAllocator()) {}
+
+ const GRState* AssumeSymNE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymEQ(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymLT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymGT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymGE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymLE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AddEQ(const GRState* St, SymbolRef sym, const llvm::APSInt& V);
+
+ const GRState* AddNE(const GRState* St, SymbolRef sym, const llvm::APSInt& V);
+
+ const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const;
+ bool isNotEqual(const GRState* St, SymbolRef sym, const llvm::APSInt& V)
+ const;
+ bool isEqual(const GRState* St, SymbolRef sym, const llvm::APSInt& V)
+ const;
+
+ const GRState* RemoveDeadBindings(const GRState* St, SymbolReaper& SymReaper);
+
+ void print(const GRState* St, std::ostream& Out,
+ const char* nl, const char *sep);
+};
+
+} // end anonymous namespace
+
+ConstraintManager* clang::CreateBasicConstraintManager(GRStateManager& StateMgr)
+{
+ return new BasicConstraintManager(StateMgr);
+}
+
+const GRState*
+BasicConstraintManager::AssumeSymNE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible) {
+ // First, determine if sym == X, where X != V.
+ if (const llvm::APSInt* X = getSymVal(St, sym)) {
+ isFeasible = (*X != V);
+ return St;
+ }
+
+ // Second, determine if sym != V.
+ if (isNotEqual(St, sym, V)) {
+ isFeasible = true;
+ return St;
+ }
+
+ // If we reach here, sym is not a constant and we don't know if it is != V.
+ // Make that assumption.
+ isFeasible = true;
+ return AddNE(St, sym, V);
+}
+
+const GRState*
+BasicConstraintManager::AssumeSymEQ(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible) {
+ // First, determine if sym == X, where X != V.
+ if (const llvm::APSInt* X = getSymVal(St, sym)) {
+ isFeasible = *X == V;
+ return St;
+ }
+
+ // Second, determine if sym != V.
+ if (isNotEqual(St, sym, V)) {
+ isFeasible = false;
+ return St;
+ }
+
+ // If we reach here, sym is not a constant and we don't know if it is == V.
+ // Make that assumption.
+
+ isFeasible = true;
+ return AddEQ(St, sym, V);
+}
+
+// These logic will be handled in another ConstraintManager.
+const GRState*
+BasicConstraintManager::AssumeSymLT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible) {
+
+ // Is 'V' the smallest possible value?
+ if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
+ // sym cannot be any value less than 'V'. This path is infeasible.
+ isFeasible = false;
+ return St;
+ }
+
+ // FIXME: For now have assuming x < y be the same as assuming sym != V;
+ return AssumeSymNE(St, sym, V, isFeasible);
+}
+
+const GRState*
+BasicConstraintManager::AssumeSymGT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible) {
+
+ // Is 'V' the largest possible value?
+ if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
+ // sym cannot be any value greater than 'V'. This path is infeasible.
+ isFeasible = false;
+ return St;
+ }
+
+ // FIXME: For now have assuming x > y be the same as assuming sym != V;
+ return AssumeSymNE(St, sym, V, isFeasible);
+}
+
+const GRState*
+BasicConstraintManager::AssumeSymGE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible) {
+
+ // Reject a path if the value of sym is a constant X and !(X >= V).
+ if (const llvm::APSInt* X = getSymVal(St, sym)) {
+ isFeasible = *X >= V;
+ return St;
+ }
+
+ // Sym is not a constant, but it is worth looking to see if V is the
+ // maximum integer value.
+ if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
+ // If we know that sym != V, then this condition is infeasible since
+ // there is no other value greater than V.
+ isFeasible = !isNotEqual(St, sym, V);
+
+ // If the path is still feasible then as a consequence we know that
+ // 'sym == V' because we cannot have 'sym > V' (no larger values).
+ // Add this constraint.
+ if (isFeasible)
+ return AddEQ(St, sym, V);
+ }
+ else
+ isFeasible = true;
+
+ return St;
+}
+
+const GRState*
+BasicConstraintManager::AssumeSymLE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible) {
+
+ // Reject a path if the value of sym is a constant X and !(X <= V).
+ if (const llvm::APSInt* X = getSymVal(St, sym)) {
+ isFeasible = *X <= V;
+ return St;
+ }
+
+ // Sym is not a constant, but it is worth looking to see if V is the
+ // minimum integer value.
+ if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
+ // If we know that sym != V, then this condition is infeasible since
+ // there is no other value less than V.
+ isFeasible = !isNotEqual(St, sym, V);
+
+ // If the path is still feasible then as a consequence we know that
+ // 'sym == V' because we cannot have 'sym < V' (no smaller values).
+ // Add this constraint.
+ if (isFeasible)
+ return AddEQ(St, sym, V);
+ }
+ else
+ isFeasible = true;
+
+ return St;
+}
+
+const GRState* BasicConstraintManager::AddEQ(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V) {
+ // Create a new state with the old binding replaced.
+ GRStateRef state(St, StateMgr);
+ return state.set<ConstEq>(sym, &V);
+}
+
+const GRState* BasicConstraintManager::AddNE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V) {
+
+ GRStateRef state(St, StateMgr);
+
+ // First, retrieve the NE-set associated with the given symbol.
+ ConstNotEqTy::data_type* T = state.get<ConstNotEq>(sym);
+ GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet();
+
+
+ // Now add V to the NE set.
+ S = ISetFactory.Add(S, &V);
+
+ // Create a new state with the old binding replaced.
+ return state.set<ConstNotEq>(sym, S);
+}
+
+const llvm::APSInt* BasicConstraintManager::getSymVal(const GRState* St,
+ SymbolRef sym) const {
+ const ConstEqTy::data_type* T = St->get<ConstEq>(sym);
+ return T ? *T : NULL;
+}
+
+bool BasicConstraintManager::isNotEqual(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V) const {
+
+ // Retrieve the NE-set associated with the given symbol.
+ const ConstNotEqTy::data_type* T = St->get<ConstNotEq>(sym);
+
+ // See if V is present in the NE-set.
+ return T ? T->contains(&V) : false;
+}
+
+bool BasicConstraintManager::isEqual(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V) const {
+ // Retrieve the EQ-set associated with the given symbol.
+ const ConstEqTy::data_type* T = St->get<ConstEq>(sym);
+ // See if V is present in the EQ-set.
+ return T ? **T == V : false;
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+const GRState*
+BasicConstraintManager::RemoveDeadBindings(const GRState* St,
+ SymbolReaper& SymReaper) {
+
+ GRStateRef state(St, StateMgr);
+ ConstEqTy CE = state.get<ConstEq>();
+ ConstEqTy::Factory& CEFactory = state.get_context<ConstEq>();
+
+ for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym)) CE = CEFactory.Remove(CE, sym);
+ }
+ state = state.set<ConstEq>(CE);
+
+ ConstNotEqTy CNE = state.get<ConstNotEq>();
+ ConstNotEqTy::Factory& CNEFactory = state.get_context<ConstNotEq>();
+
+ for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym)) CNE = CNEFactory.Remove(CNE, sym);
+ }
+
+ return state.set<ConstNotEq>(CNE);
+}
+
+void BasicConstraintManager::print(const GRState* St, std::ostream& Out,
+ const char* nl, const char *sep) {
+ // Print equality constraints.
+
+ ConstEqTy CE = St->get<ConstEq>();
+
+ if (!CE.isEmpty()) {
+ Out << nl << sep << "'==' constraints:";
+
+ for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) {
+ Out << nl << " $" << I.getKey();
+ llvm::raw_os_ostream OS(Out);
+ OS << " : " << *I.getData();
+ }
+ }
+
+ // Print != constraints.
+
+ ConstNotEqTy CNE = St->get<ConstNotEq>();
+
+ if (!CNE.isEmpty()) {
+ Out << nl << sep << "'!=' constraints:";
+
+ for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) {
+ Out << nl << " $" << I.getKey() << " : ";
+ bool isFirst = true;
+
+ GRState::IntSetTy::iterator J = I.getData().begin(),
+ EJ = I.getData().end();
+
+ for ( ; J != EJ; ++J) {
+ if (isFirst) isFirst = false;
+ else Out << ", ";
+
+ Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream.
+ }
+ }
+ }
+}
diff --git a/lib/Analysis/BasicObjCFoundationChecks.cpp b/lib/Analysis/BasicObjCFoundationChecks.cpp
new file mode 100644
index 0000000..98e9551
--- /dev/null
+++ b/lib/Analysis/BasicObjCFoundationChecks.cpp
@@ -0,0 +1,492 @@
+//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicObjCFoundationChecks, a class that encapsulates
+// a set of simple checks to run on Objective-C code using Apple's Foundation
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BasicObjCFoundationChecks.h"
+
+#include "clang/Analysis/PathSensitive/ExplodedGraph.h"
+#include "clang/Analysis/PathSensitive/GRSimpleAPICheck.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/PathSensitive/MemRegion.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace clang;
+
+static ObjCInterfaceType* GetReceiverType(ObjCMessageExpr* ME) {
+ Expr* Receiver = ME->getReceiver();
+
+ if (!Receiver)
+ return NULL;
+
+ QualType X = Receiver->getType();
+
+ if (X->isPointerType()) {
+ Type* TP = X.getTypePtr();
+ const PointerType* T = TP->getAsPointerType();
+ return dyn_cast<ObjCInterfaceType>(T->getPointeeType().getTypePtr());
+ }
+
+ // FIXME: Support ObjCQualifiedIdType?
+ return NULL;
+}
+
+static const char* GetReceiverNameType(ObjCMessageExpr* ME) {
+ ObjCInterfaceType* ReceiverType = GetReceiverType(ME);
+ return ReceiverType ? ReceiverType->getDecl()->getIdentifier()->getName()
+ : NULL;
+}
+
+namespace {
+
+class VISIBILITY_HIDDEN APIMisuse : public BugType {
+public:
+ APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
+};
+
+class VISIBILITY_HIDDEN BasicObjCFoundationChecks : public GRSimpleAPICheck {
+ APIMisuse *BT;
+ BugReporter& BR;
+ ASTContext &Ctx;
+ GRStateManager* VMgr;
+
+ SVal GetSVal(const GRState* St, Expr* E) { return VMgr->GetSVal(St, E); }
+
+ bool isNSString(ObjCInterfaceType* T, const char* suffix);
+ bool AuditNSString(NodeTy* N, ObjCMessageExpr* ME);
+
+ void Warn(NodeTy* N, Expr* E, const std::string& s);
+ void WarnNilArg(NodeTy* N, Expr* E);
+
+ bool CheckNilArg(NodeTy* N, unsigned Arg);
+
+public:
+ BasicObjCFoundationChecks(ASTContext& ctx, GRStateManager* vmgr,
+ BugReporter& br)
+ : BT(0), BR(br), Ctx(ctx), VMgr(vmgr) {}
+
+ bool Audit(ExplodedNode<GRState>* N, GRStateManager&);
+
+private:
+ void WarnNilArg(NodeTy* N, ObjCMessageExpr* ME, unsigned Arg) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Argument to '" << GetReceiverNameType(ME) << "' method '"
+ << ME->getSelector().getAsString() << "' cannot be nil.";
+
+ // Lazily create the BugType object for NilArg. This will be owned
+ // by the BugReporter object 'BR' once we call BR.EmitWarning.
+ if (!BT) BT = new APIMisuse("nil argument");
+
+ RangedBugReport *R = new RangedBugReport(*BT, os.str().c_str(), N);
+ R->addRange(ME->getArg(Arg)->getSourceRange());
+ BR.EmitReport(R);
+ }
+};
+
+} // end anonymous namespace
+
+
+GRSimpleAPICheck*
+clang::CreateBasicObjCFoundationChecks(ASTContext& Ctx,
+ GRStateManager* VMgr, BugReporter& BR) {
+
+ return new BasicObjCFoundationChecks(Ctx, VMgr, BR);
+}
+
+
+
+bool BasicObjCFoundationChecks::Audit(ExplodedNode<GRState>* N,
+ GRStateManager&) {
+
+ ObjCMessageExpr* ME =
+ cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+
+ ObjCInterfaceType* ReceiverType = GetReceiverType(ME);
+
+ if (!ReceiverType)
+ return false;
+
+ const char* name = ReceiverType->getDecl()->getIdentifier()->getName();
+
+ if (!name)
+ return false;
+
+ if (name[0] != 'N' || name[1] != 'S')
+ return false;
+
+ name += 2;
+
+ // FIXME: Make all of this faster.
+
+ if (isNSString(ReceiverType, name))
+ return AuditNSString(N, ME);
+
+ return false;
+}
+
+static inline bool isNil(SVal X) {
+ return isa<loc::ConcreteInt>(X);
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+bool BasicObjCFoundationChecks::CheckNilArg(NodeTy* N, unsigned Arg) {
+ ObjCMessageExpr* ME =
+ cast<ObjCMessageExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+
+ Expr * E = ME->getArg(Arg);
+
+ if (isNil(GetSVal(N->getState(), E))) {
+ WarnNilArg(N, ME, Arg);
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// NSString checking.
+//===----------------------------------------------------------------------===//
+
+bool BasicObjCFoundationChecks::isNSString(ObjCInterfaceType* T,
+ const char* suffix) {
+
+ return !strcmp("String", suffix) || !strcmp("MutableString", suffix);
+}
+
+bool BasicObjCFoundationChecks::AuditNSString(NodeTy* N,
+ ObjCMessageExpr* ME) {
+
+ Selector S = ME->getSelector();
+
+ if (S.isUnarySelector())
+ return false;
+
+ // FIXME: This is going to be really slow doing these checks with
+ // lexical comparisons.
+
+ std::string name = S.getAsString();
+ assert (!name.empty());
+ const char* cstr = &name[0];
+ unsigned len = name.size();
+
+ switch (len) {
+ default:
+ break;
+ case 8:
+ if (!strcmp(cstr, "compare:"))
+ return CheckNilArg(N, 0);
+
+ break;
+
+ case 15:
+ // FIXME: Checking for initWithFormat: will not work in most cases
+ // yet because [NSString alloc] returns id, not NSString*. We will
+ // need support for tracking expected-type information in the analyzer
+ // to find these errors.
+ if (!strcmp(cstr, "initWithFormat:"))
+ return CheckNilArg(N, 0);
+
+ break;
+
+ case 16:
+ if (!strcmp(cstr, "compare:options:"))
+ return CheckNilArg(N, 0);
+
+ break;
+
+ case 22:
+ if (!strcmp(cstr, "compare:options:range:"))
+ return CheckNilArg(N, 0);
+
+ break;
+
+ case 23:
+
+ if (!strcmp(cstr, "caseInsensitiveCompare:"))
+ return CheckNilArg(N, 0);
+
+ break;
+
+ case 29:
+ if (!strcmp(cstr, "compare:options:range:locale:"))
+ return CheckNilArg(N, 0);
+
+ break;
+
+ case 37:
+ if (!strcmp(cstr, "componentsSeparatedByCharactersInSet:"))
+ return CheckNilArg(N, 0);
+
+ break;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN AuditCFNumberCreate : public GRSimpleAPICheck {
+ APIMisuse* BT;
+
+ // FIXME: Either this should be refactored into GRSimpleAPICheck, or
+ // it should always be passed with a call to Audit. The latter
+ // approach makes this class more stateless.
+ ASTContext& Ctx;
+ IdentifierInfo* II;
+ GRStateManager* VMgr;
+ BugReporter& BR;
+
+ SVal GetSVal(const GRState* St, Expr* E) { return VMgr->GetSVal(St, E); }
+
+public:
+ AuditCFNumberCreate(ASTContext& ctx, GRStateManager* vmgr, BugReporter& br)
+ : BT(0), Ctx(ctx), II(&Ctx.Idents.get("CFNumberCreate")), VMgr(vmgr), BR(br){}
+
+ ~AuditCFNumberCreate() {}
+
+ bool Audit(ExplodedNode<GRState>* N, GRStateManager&);
+
+private:
+ void AddError(const TypedRegion* R, Expr* Ex, ExplodedNode<GRState> *N,
+ uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+};
+} // end anonymous namespace
+
+enum CFNumberType {
+ kCFNumberSInt8Type = 1,
+ kCFNumberSInt16Type = 2,
+ kCFNumberSInt32Type = 3,
+ kCFNumberSInt64Type = 4,
+ kCFNumberFloat32Type = 5,
+ kCFNumberFloat64Type = 6,
+ kCFNumberCharType = 7,
+ kCFNumberShortType = 8,
+ kCFNumberIntType = 9,
+ kCFNumberLongType = 10,
+ kCFNumberLongLongType = 11,
+ kCFNumberFloatType = 12,
+ kCFNumberDoubleType = 13,
+ kCFNumberCFIndexType = 14,
+ kCFNumberNSIntegerType = 15,
+ kCFNumberCGFloatType = 16
+};
+
+namespace {
+ template<typename T>
+ class Optional {
+ bool IsKnown;
+ T Val;
+ public:
+ Optional() : IsKnown(false), Val(0) {}
+ Optional(const T& val) : IsKnown(true), Val(val) {}
+
+ bool isKnown() const { return IsKnown; }
+
+ const T& getValue() const {
+ assert (isKnown());
+ return Val;
+ }
+
+ operator const T&() const {
+ return getValue();
+ }
+ };
+}
+
+static Optional<uint64_t> GetCFNumberSize(ASTContext& Ctx, uint64_t i) {
+ static unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
+
+ if (i < kCFNumberCharType)
+ return FixedSize[i-1];
+
+ QualType T;
+
+ switch (i) {
+ case kCFNumberCharType: T = Ctx.CharTy; break;
+ case kCFNumberShortType: T = Ctx.ShortTy; break;
+ case kCFNumberIntType: T = Ctx.IntTy; break;
+ case kCFNumberLongType: T = Ctx.LongTy; break;
+ case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
+ case kCFNumberFloatType: T = Ctx.FloatTy; break;
+ case kCFNumberDoubleType: T = Ctx.DoubleTy; break;
+ case kCFNumberCFIndexType:
+ case kCFNumberNSIntegerType:
+ case kCFNumberCGFloatType:
+ // FIXME: We need a way to map from names to Type*.
+ default:
+ return Optional<uint64_t>();
+ }
+
+ return Ctx.getTypeSize(T);
+}
+
+#if 0
+static const char* GetCFNumberTypeStr(uint64_t i) {
+ static const char* Names[] = {
+ "kCFNumberSInt8Type",
+ "kCFNumberSInt16Type",
+ "kCFNumberSInt32Type",
+ "kCFNumberSInt64Type",
+ "kCFNumberFloat32Type",
+ "kCFNumberFloat64Type",
+ "kCFNumberCharType",
+ "kCFNumberShortType",
+ "kCFNumberIntType",
+ "kCFNumberLongType",
+ "kCFNumberLongLongType",
+ "kCFNumberFloatType",
+ "kCFNumberDoubleType",
+ "kCFNumberCFIndexType",
+ "kCFNumberNSIntegerType",
+ "kCFNumberCGFloatType"
+ };
+
+ return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
+}
+#endif
+
+bool AuditCFNumberCreate::Audit(ExplodedNode<GRState>* N,GRStateManager&){
+ CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+ Expr* Callee = CE->getCallee();
+ SVal CallV = GetSVal(N->getState(), Callee);
+ const FunctionDecl* FD = CallV.getAsFunctionDecl();
+
+ if (!FD || FD->getIdentifier() != II || CE->getNumArgs()!=3)
+ return false;
+
+ // Get the value of the "theType" argument.
+ SVal TheTypeVal = GetSVal(N->getState(), CE->getArg(1));
+
+ // FIXME: We really should allow ranges of valid theType values, and
+ // bifurcate the state appropriately.
+ nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
+
+ if (!V)
+ return false;
+
+ uint64_t NumberKind = V->getValue().getLimitedValue();
+ Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);
+
+ // FIXME: In some cases we can emit an error.
+ if (!TargetSize.isKnown())
+ return false;
+
+ // Look at the value of the integer being passed by reference. Essentially
+ // we want to catch cases where the value passed in is not equal to the
+ // size of the type being created.
+ SVal TheValueExpr = GetSVal(N->getState(), CE->getArg(2));
+
+ // FIXME: Eventually we should handle arbitrary locations. We can do this
+ // by having an enhanced memory model that does low-level typing.
+ loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
+
+ if (!LV)
+ return false;
+
+ const TypedRegion* R = dyn_cast<TypedRegion>(LV->getRegion());
+ if (!R) return false;
+
+ while (const TypedViewRegion* ATR = dyn_cast<TypedViewRegion>(R)) {
+ R = dyn_cast<TypedRegion>(ATR->getSuperRegion());
+ if (!R) return false;
+ }
+
+ QualType T = Ctx.getCanonicalType(R->getValueType(Ctx));
+
+ // FIXME: If the pointee isn't an integer type, should we flag a warning?
+ // People can do weird stuff with pointers.
+
+ if (!T->isIntegerType())
+ return false;
+
+ uint64_t SourceSize = Ctx.getTypeSize(T);
+
+ // CHECK: is SourceSize == TargetSize
+
+ if (SourceSize == TargetSize)
+ return false;
+
+ AddError(R, CE->getArg(2), N, SourceSize, TargetSize, NumberKind);
+
+ // FIXME: We can actually create an abstract "CFNumber" object that has
+ // the bits initialized to the provided values.
+ return SourceSize < TargetSize;
+}
+
+void AuditCFNumberCreate::AddError(const TypedRegion* R, Expr* Ex,
+ ExplodedNode<GRState> *N,
+ uint64_t SourceSize, uint64_t TargetSize,
+ uint64_t NumberKind) {
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << (SourceSize == 8 ? "An " : "A ")
+ << SourceSize << " bit integer is used to initialize a CFNumber "
+ "object that represents "
+ << (TargetSize == 8 ? "an " : "a ")
+ << TargetSize << " bit integer. ";
+
+ if (SourceSize < TargetSize)
+ os << (TargetSize - SourceSize)
+ << " bits of the CFNumber value will be garbage." ;
+ else
+ os << (SourceSize - TargetSize)
+ << " bits of the input integer will be lost.";
+
+ // Lazily create the BugType object. This will be owned
+ // by the BugReporter object 'BR' once we call BR.EmitWarning.
+ if (!BT) BT = new APIMisuse("Bad use of CFNumberCreate");
+ RangedBugReport *report = new RangedBugReport(*BT, os.str().c_str(), N);
+ report->addRange(Ex->getSourceRange());
+ BR.EmitReport(report);
+}
+
+GRSimpleAPICheck*
+clang::CreateAuditCFNumberCreate(ASTContext& Ctx,
+ GRStateManager* VMgr, BugReporter& BR) {
+ return new AuditCFNumberCreate(Ctx, VMgr, BR);
+}
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+
+void clang::RegisterAppleChecks(GRExprEngine& Eng) {
+ ASTContext& Ctx = Eng.getContext();
+ GRStateManager* VMgr = &Eng.getStateManager();
+ BugReporter &BR = Eng.getBugReporter();
+
+ Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, VMgr, BR),
+ Stmt::ObjCMessageExprClass);
+
+ Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, VMgr, BR),
+ Stmt::CallExprClass);
+
+ RegisterNSErrorChecks(BR, Eng);
+}
diff --git a/lib/Analysis/BasicObjCFoundationChecks.h b/lib/Analysis/BasicObjCFoundationChecks.h
new file mode 100644
index 0000000..6c594ea
--- /dev/null
+++ b/lib/Analysis/BasicObjCFoundationChecks.h
@@ -0,0 +1,47 @@
+//== BasicObjCFoundationChecks.h - Simple Apple-Foundation checks -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicObjCFoundationChecks, a class that encapsulates
+// a set of simple checks to run on Objective-C code using Apple's Foundation
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/ExplodedGraph.h"
+#include "clang/Analysis/PathSensitive/GRSimpleAPICheck.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/Support/Compiler.h"
+
+#ifndef LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
+#define LLVM_CLANG_ANALYSIS_BASICOBJCFOUNDATIONCHECKS
+
+namespace clang {
+
+class GRSimpleAPICheck;
+class ASTContext;
+class GRStateManager;
+class BugReporter;
+class GRExprEngine;
+
+GRSimpleAPICheck* CreateBasicObjCFoundationChecks(ASTContext& Ctx,
+ GRStateManager* VMgr,
+ BugReporter& BR);
+
+GRSimpleAPICheck* CreateAuditCFNumberCreate(ASTContext& Ctx,
+ GRStateManager* VMgr,
+ BugReporter& BR);
+
+void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng);
+
+} // end clang namespace
+
+#endif
diff --git a/lib/Analysis/BasicStore.cpp b/lib/Analysis/BasicStore.cpp
new file mode 100644
index 0000000..2dd46c3
--- /dev/null
+++ b/lib/Analysis/BasicStore.cpp
@@ -0,0 +1,637 @@
+//== BasicStore.cpp - Basic map from Locations to Values --------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the BasicStore and BasicStoreManager classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Streams.h"
+
+using namespace clang;
+
+typedef llvm::ImmutableMap<const MemRegion*,SVal> BindingsTy;
+
+namespace {
+
+class VISIBILITY_HIDDEN BasicStoreSubRegionMap : public SubRegionMap {
+public:
+ BasicStoreSubRegionMap() {}
+
+ bool iterSubRegions(const MemRegion* R, Visitor& V) const {
+ return true; // Do nothing. No subregions.
+ }
+};
+
+class VISIBILITY_HIDDEN BasicStoreManager : public StoreManager {
+ BindingsTy::Factory VBFactory;
+ const MemRegion* SelfRegion;
+
+public:
+ BasicStoreManager(GRStateManager& mgr)
+ : StoreManager(mgr),
+ VBFactory(mgr.getAllocator()),
+ SelfRegion(0) {}
+
+ ~BasicStoreManager() {}
+
+ SubRegionMap* getSubRegionMap(const GRState *state) {
+ return new BasicStoreSubRegionMap();
+ }
+
+ SVal Retrieve(const GRState *state, Loc loc, QualType T = QualType());
+
+ const GRState* Bind(const GRState* St, Loc L, SVal V) {
+ Store store = BindInternal(St->getStore(), L, V);
+ return StateMgr.MakeStateWithStore(St, store);
+ }
+
+ Store scanForIvars(Stmt *B, const Decl* SelfDecl, Store St);
+
+ Store BindInternal(Store St, Loc loc, SVal V);
+ Store Remove(Store St, Loc loc);
+ Store getInitialStore();
+
+ // FIXME: Investigate what is using this. This method should be removed.
+ virtual Loc getLoc(const VarDecl* VD) {
+ return Loc::MakeVal(MRMgr.getVarRegion(VD));
+ }
+
+ const GRState* BindCompoundLiteral(const GRState* St,
+ const CompoundLiteralExpr* CL,
+ SVal V) {
+ return St;
+ }
+
+ SVal getLValueVar(const GRState* St, const VarDecl* VD);
+ SVal getLValueString(const GRState* St, const StringLiteral* S);
+ SVal getLValueCompoundLiteral(const GRState* St,
+ const CompoundLiteralExpr* CL);
+ SVal getLValueIvar(const GRState* St, const ObjCIvarDecl* D, SVal Base);
+ SVal getLValueField(const GRState* St, SVal Base, const FieldDecl* D);
+ SVal getLValueElement(const GRState* St, QualType elementType,
+ SVal Base, SVal Offset);
+
+ /// ArrayToPointer - Used by GRExprEngine::VistCast to handle implicit
+ /// conversions between arrays and pointers.
+ SVal ArrayToPointer(Loc Array) { return Array; }
+
+ /// getSelfRegion - Returns the region for the 'self' (Objective-C) or
+ /// 'this' object (C++). When used when analyzing a normal function this
+ /// method returns NULL.
+ const MemRegion* getSelfRegion(Store) { return SelfRegion; }
+
+ /// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
+ /// It returns a new Store with these values removed, and populates LSymbols
+ /// and DSymbols with the known set of live and dead symbols respectively.
+ Store
+ RemoveDeadBindings(const GRState* state, Stmt* Loc,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
+
+ void iterBindings(Store store, BindingsHandler& f);
+
+ const GRState* BindDecl(const GRState* St, const VarDecl* VD, SVal InitVal) {
+ Store store = BindDeclInternal(St->getStore(), VD, &InitVal);
+ return StateMgr.MakeStateWithStore(St, store);
+ }
+
+ const GRState* BindDeclWithNoInit(const GRState* St, const VarDecl* VD) {
+ Store store = BindDeclInternal(St->getStore(), VD, 0);
+ return StateMgr.MakeStateWithStore(St, store);
+ }
+
+ Store BindDeclInternal(Store store, const VarDecl* VD, SVal* InitVal);
+
+ static inline BindingsTy GetBindings(Store store) {
+ return BindingsTy(static_cast<const BindingsTy::TreeTy*>(store));
+ }
+
+ void print(Store store, std::ostream& Out, const char* nl, const char *sep);
+
+private:
+ ASTContext& getContext() { return StateMgr.getContext(); }
+};
+
+} // end anonymous namespace
+
+
+StoreManager* clang::CreateBasicStoreManager(GRStateManager& StMgr) {
+ return new BasicStoreManager(StMgr);
+}
+
+SVal BasicStoreManager::getLValueVar(const GRState* St, const VarDecl* VD) {
+ return Loc::MakeVal(MRMgr.getVarRegion(VD));
+}
+
+SVal BasicStoreManager::getLValueString(const GRState* St,
+ const StringLiteral* S) {
+ return Loc::MakeVal(MRMgr.getStringRegion(S));
+}
+
+SVal BasicStoreManager::getLValueCompoundLiteral(const GRState* St,
+ const CompoundLiteralExpr* CL){
+ return Loc::MakeVal(MRMgr.getCompoundLiteralRegion(CL));
+}
+
+SVal BasicStoreManager::getLValueIvar(const GRState* St, const ObjCIvarDecl* D,
+ SVal Base) {
+
+ if (Base.isUnknownOrUndef())
+ return Base;
+
+ Loc BaseL = cast<Loc>(Base);
+
+ if (isa<loc::MemRegionVal>(BaseL)) {
+ const MemRegion *BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
+
+ if (BaseR == SelfRegion)
+ return loc::MemRegionVal(MRMgr.getObjCIvarRegion(D, BaseR));
+ }
+
+ return UnknownVal();
+}
+
+SVal BasicStoreManager::getLValueField(const GRState* St, SVal Base,
+ const FieldDecl* D) {
+
+ if (Base.isUnknownOrUndef())
+ return Base;
+
+ Loc BaseL = cast<Loc>(Base);
+ const MemRegion* BaseR = 0;
+
+ switch(BaseL.getSubKind()) {
+ case loc::GotoLabelKind:
+ return UndefinedVal();
+
+ case loc::MemRegionKind:
+ BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
+ break;
+
+ case loc::ConcreteIntKind:
+ // While these seem funny, this can happen through casts.
+ // FIXME: What we should return is the field offset. For example,
+ // add the field offset to the integer value. That way funny things
+ // like this work properly: &(((struct foo *) 0xa)->f)
+ return Base;
+
+ default:
+ assert ("Unhandled Base.");
+ return Base;
+ }
+
+ return Loc::MakeVal(MRMgr.getFieldRegion(D, BaseR));
+}
+
+SVal BasicStoreManager::getLValueElement(const GRState* St,
+ QualType elementType,
+ SVal Base, SVal Offset) {
+
+ if (Base.isUnknownOrUndef())
+ return Base;
+
+ Loc BaseL = cast<Loc>(Base);
+ const TypedRegion* BaseR = 0;
+
+ switch(BaseL.getSubKind()) {
+ case loc::GotoLabelKind:
+ // Technically we can get here if people do funny things with casts.
+ return UndefinedVal();
+
+ case loc::MemRegionKind: {
+ const MemRegion *R = cast<loc::MemRegionVal>(BaseL).getRegion();
+
+ if (isa<ElementRegion>(R)) {
+ // int x;
+ // char* y = (char*) &x;
+ // 'y' => ElementRegion(0, VarRegion('x'))
+ // y[0] = 'a';
+ return Base;
+ }
+
+
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ BaseR = TR;
+ break;
+ }
+
+ if (const SymbolicRegion* SR = dyn_cast<SymbolicRegion>(R)) {
+ SymbolRef Sym = SR->getSymbol();
+ BaseR = MRMgr.getTypedViewRegion(Sym->getType(getContext()), SR);
+ }
+
+ break;
+ }
+
+ case loc::ConcreteIntKind:
+ // While these seem funny, this can happen through casts.
+ // FIXME: What we should return is the field offset. For example,
+ // add the field offset to the integer value. That way funny things
+ // like this work properly: &(((struct foo *) 0xa)->f)
+ return Base;
+
+ default:
+ assert ("Unhandled Base.");
+ return Base;
+ }
+
+ if (BaseR)
+ return Loc::MakeVal(MRMgr.getElementRegion(elementType, UnknownVal(),
+ BaseR));
+ else
+ return UnknownVal();
+}
+
+static bool isHigherOrderRawPtr(QualType T, ASTContext &C) {
+ bool foundPointer = false;
+ while (1) {
+ const PointerType *PT = T->getAsPointerType();
+ if (!PT) {
+ if (!foundPointer)
+ return false;
+
+ // intptr_t* or intptr_t**, etc?
+ if (T->isIntegerType() && C.getTypeSize(T) == C.getTypeSize(C.VoidPtrTy))
+ return true;
+
+ QualType X = C.getCanonicalType(T).getUnqualifiedType();
+ return X == C.VoidTy;
+ }
+
+ foundPointer = true;
+ T = PT->getPointeeType();
+ }
+}
+
+SVal BasicStoreManager::Retrieve(const GRState* state, Loc loc, QualType T) {
+
+ if (isa<UnknownVal>(loc))
+ return UnknownVal();
+
+ assert (!isa<UndefinedVal>(loc));
+
+ switch (loc.getSubKind()) {
+
+ case loc::MemRegionKind: {
+ const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // Just support void**, void***, intptr_t*, intptr_t**, etc., for now.
+ // This is needed to handle OSCompareAndSwapPtr() and friends.
+ ASTContext &Ctx = StateMgr.getContext();
+ QualType T = ER->getLocationType(Ctx);
+
+ if (!isHigherOrderRawPtr(T, Ctx))
+ return UnknownVal();
+
+ // FIXME: Should check for element 0.
+ // Otherwise, strip the element region.
+ R = ER->getSuperRegion();
+ }
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return UnknownVal();
+
+ BindingsTy B = GetBindings(state->getStore());
+ BindingsTy::data_type* T = B.lookup(R);
+ return T ? *T : UnknownVal();
+ }
+
+ case loc::ConcreteIntKind:
+ // Some clients may call GetSVal with such an option simply because
+ // they are doing a quick scan through their Locs (potentially to
+ // invalidate their bindings). Just return Undefined.
+ return UndefinedVal();
+
+ default:
+ assert (false && "Invalid Loc.");
+ break;
+ }
+
+ return UnknownVal();
+}
+
+Store BasicStoreManager::BindInternal(Store store, Loc loc, SVal V) {
+ switch (loc.getSubKind()) {
+ case loc::MemRegionKind: {
+ const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+ ASTContext &C = StateMgr.getContext();
+
+ // Special case: handle store of pointer values (Loc) to pointers via
+ // a cast to intXX_t*, void*, etc. This is needed to handle
+ // OSCompareAndSwap32Barrier/OSCompareAndSwap64Barrier.
+ if (isa<Loc>(V) || isa<nonloc::LocAsInteger>(V))
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: Should check for index 0.
+ QualType T = ER->getLocationType(C);
+
+ if (isHigherOrderRawPtr(T, C))
+ R = ER->getSuperRegion();
+ }
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return store;
+
+ // We only track bindings to self.ivar.
+ if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R))
+ if (IVR->getSuperRegion() != SelfRegion)
+ return store;
+
+ if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&V)) {
+ // Only convert 'V' to a location iff the underlying region type
+ // is a location as well.
+ // FIXME: We are allowing a store of an arbitrary location to
+ // a pointer. We may wish to flag a type error here if the types
+ // are incompatible. This may also cause lots of breakage
+ // elsewhere. Food for thought.
+ if (const TypedRegion *TyR = dyn_cast<TypedRegion>(R)) {
+ if (TyR->isBoundable(C) &&
+ Loc::IsLocType(TyR->getValueType(C)))
+ V = X->getLoc();
+ }
+ }
+
+ BindingsTy B = GetBindings(store);
+ return V.isUnknown()
+ ? VBFactory.Remove(B, R).getRoot()
+ : VBFactory.Add(B, R, V).getRoot();
+ }
+ default:
+ assert ("SetSVal for given Loc type not yet implemented.");
+ return store;
+ }
+}
+
+Store BasicStoreManager::Remove(Store store, Loc loc) {
+ switch (loc.getSubKind()) {
+ case loc::MemRegionKind: {
+ const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+
+ if (!(isa<VarRegion>(R) || isa<ObjCIvarRegion>(R)))
+ return store;
+
+ return VBFactory.Remove(GetBindings(store), R).getRoot();
+ }
+ default:
+ assert ("Remove for given Loc type not yet implemented.");
+ return store;
+ }
+}
+
+Store
+BasicStoreManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
+{
+
+ Store store = state->getStore();
+ BindingsTy B = GetBindings(store);
+ typedef SVal::symbol_iterator symbol_iterator;
+
+ // Iterate over the variable bindings.
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
+ if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) {
+ if (SymReaper.isLive(Loc, VR->getDecl()))
+ RegionRoots.push_back(VR);
+ else
+ continue;
+ }
+ else if (isa<ObjCIvarRegion>(I.getKey())) {
+ RegionRoots.push_back(I.getKey());
+ }
+ else
+ continue;
+
+ // Mark the bindings in the data as live.
+ SVal X = I.getData();
+ for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
+ SymReaper.markLive(*SI);
+ }
+
+ // Scan for live variables and live symbols.
+ llvm::SmallPtrSet<const MemRegion*, 10> Marked;
+
+ while (!RegionRoots.empty()) {
+ const MemRegion* MR = RegionRoots.back();
+ RegionRoots.pop_back();
+
+ while (MR) {
+ if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(MR)) {
+ SymReaper.markLive(SymR->getSymbol());
+ break;
+ }
+ else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) {
+ if (Marked.count(MR))
+ break;
+
+ Marked.insert(MR);
+ SVal X = Retrieve(state, loc::MemRegionVal(MR));
+
+ // FIXME: We need to handle symbols nested in region definitions.
+ for (symbol_iterator SI=X.symbol_begin(),SE=X.symbol_end();SI!=SE;++SI)
+ SymReaper.markLive(*SI);
+
+ if (!isa<loc::MemRegionVal>(X))
+ break;
+
+ const loc::MemRegionVal& LVD = cast<loc::MemRegionVal>(X);
+ RegionRoots.push_back(LVD.getRegion());
+ break;
+ }
+ else if (const SubRegion* R = dyn_cast<SubRegion>(MR))
+ MR = R->getSuperRegion();
+ else
+ break;
+ }
+ }
+
+ // Remove dead variable bindings.
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
+ const MemRegion* R = I.getKey();
+
+ if (!Marked.count(R)) {
+ store = Remove(store, Loc::MakeVal(R));
+ SVal X = I.getData();
+
+ for (symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end(); SI!=SE; ++SI)
+ SymReaper.maybeDead(*SI);
+ }
+ }
+
+ return store;
+}
+
+Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl, Store St) {
+ for (Stmt::child_iterator CI=B->child_begin(), CE=B->child_end();
+ CI != CE; ++CI) {
+
+ if (!*CI)
+ continue;
+
+ // Check if the statement is an ivar reference. We only
+ // care about self.ivar.
+ if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(*CI)) {
+ const Expr *Base = IV->getBase()->IgnoreParenCasts();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Base)) {
+ if (DR->getDecl() == SelfDecl) {
+ const MemRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(),
+ SelfRegion);
+ SVal X = ValMgr.getRegionValueSymbolVal(IVR);
+ St = BindInternal(St, Loc::MakeVal(IVR), X);
+ }
+ }
+ }
+ else
+ St = scanForIvars(*CI, SelfDecl, St);
+ }
+
+ return St;
+}
+
+Store BasicStoreManager::getInitialStore() {
+ // The LiveVariables information already has a compilation of all VarDecls
+ // used in the function. Iterate through this set, and "symbolicate"
+ // any VarDecl whose value originally comes from outside the function.
+ typedef LiveVariables::AnalysisDataTy LVDataTy;
+ LVDataTy& D = StateMgr.getLiveVariables().getAnalysisData();
+ Store St = VBFactory.GetEmptyMap().getRoot();
+
+ for (LVDataTy::decl_iterator I=D.begin_decl(), E=D.end_decl(); I != E; ++I) {
+ NamedDecl* ND = const_cast<NamedDecl*>(I->first);
+
+ // Handle implicit parameters.
+ if (ImplicitParamDecl* PD = dyn_cast<ImplicitParamDecl>(ND)) {
+ const Decl& CD = StateMgr.getCodeDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CD)) {
+ if (MD->getSelfDecl() == PD) {
+ // Create a region for "self".
+ assert (SelfRegion == 0);
+ SelfRegion = MRMgr.getObjCObjectRegion(MD->getClassInterface(),
+ MRMgr.getHeapRegion());
+
+ St = BindInternal(St, Loc::MakeVal(MRMgr.getVarRegion(PD)),
+ Loc::MakeVal(SelfRegion));
+
+ // Scan the method for ivar references. While this requires an
+ // entire AST scan, the cost should not be high in practice.
+ St = scanForIvars(MD->getBody(getContext()), PD, St);
+ }
+ }
+ }
+ else if (VarDecl* VD = dyn_cast<VarDecl>(ND)) {
+ // Punt on static variables for now.
+ if (VD->getStorageClass() == VarDecl::Static)
+ continue;
+
+ // Only handle simple types that we can symbolicate.
+ if (!SymbolManager::canSymbolicate(VD->getType()))
+ continue;
+
+ // Initialize globals and parameters to symbolic values.
+ // Initialize local variables to undefined.
+ const MemRegion *R = StateMgr.getRegion(VD);
+ SVal X = (VD->hasGlobalStorage() || isa<ParmVarDecl>(VD) ||
+ isa<ImplicitParamDecl>(VD))
+ ? ValMgr.getRegionValueSymbolVal(R)
+ : UndefinedVal();
+
+ St = BindInternal(St, Loc::MakeVal(R), X);
+ }
+ }
+ return St;
+}
+
+Store BasicStoreManager::BindDeclInternal(Store store, const VarDecl* VD,
+ SVal* InitVal) {
+
+ BasicValueFactory& BasicVals = StateMgr.getBasicVals();
+
+ // BasicStore does not model arrays and structs.
+ if (VD->getType()->isArrayType() || VD->getType()->isStructureType())
+ return store;
+
+ if (VD->hasGlobalStorage()) {
+ // Handle variables with global storage: extern, static, PrivateExtern.
+
+ // FIXME:: static variables may have an initializer, but the second time a
+ // function is called those values may not be current. Currently, a function
+ // will not be called more than once.
+
+ // Static global variables should not be visited here.
+ assert(!(VD->getStorageClass() == VarDecl::Static &&
+ VD->isFileVarDecl()));
+
+ // Process static variables.
+ if (VD->getStorageClass() == VarDecl::Static) {
+ // C99: 6.7.8 Initialization
+ // If an object that has static storage duration is not initialized
+ // explicitly, then:
+ // —if it has pointer type, it is initialized to a null pointer;
+ // —if it has arithmetic type, it is initialized to (positive or
+ // unsigned) zero;
+ if (!InitVal) {
+ QualType T = VD->getType();
+ if (Loc::IsLocType(T))
+ store = BindInternal(store, getLoc(VD),
+ loc::ConcreteInt(BasicVals.getValue(0, T)));
+ else if (T->isIntegerType())
+ store = BindInternal(store, getLoc(VD),
+ nonloc::ConcreteInt(BasicVals.getValue(0, T)));
+ else {
+ // assert(0 && "ignore other types of variables");
+ }
+ } else {
+ store = BindInternal(store, getLoc(VD), *InitVal);
+ }
+ }
+ } else {
+ // Process local scalar variables.
+ QualType T = VD->getType();
+ if (Loc::IsLocType(T) || T->isIntegerType()) {
+ SVal V = InitVal ? *InitVal : UndefinedVal();
+ store = BindInternal(store, getLoc(VD), V);
+ }
+ }
+
+ return store;
+}
+
+void BasicStoreManager::print(Store store, std::ostream& O,
+ const char* nl, const char *sep) {
+
+ llvm::raw_os_ostream Out(O);
+ BindingsTy B = GetBindings(store);
+ Out << "Variables:" << nl;
+
+ bool isFirst = true;
+
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I) {
+ if (isFirst) isFirst = false;
+ else Out << nl;
+
+ Out << ' ' << I.getKey() << " : ";
+ I.getData().print(Out);
+ }
+}
+
+
+void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) {
+ BindingsTy B = GetBindings(store);
+
+ for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I)
+ f.HandleBinding(*this, store, I.getKey(), I.getData());
+
+}
+
+StoreManager::BindingsHandler::~BindingsHandler() {}
diff --git a/lib/Analysis/BasicValueFactory.cpp b/lib/Analysis/BasicValueFactory.cpp
new file mode 100644
index 0000000..72ad0a5
--- /dev/null
+++ b/lib/Analysis/BasicValueFactory.cpp
@@ -0,0 +1,264 @@
+//=== BasicValueFactory.cpp - Basic values for Path Sens analysis --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicValueFactory, a class that manages the lifetime
+// of APSInt objects and symbolic constraints used by GRExprEngine
+// and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/BasicValueFactory.h"
+
+using namespace clang;
+
+void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
+ llvm::ImmutableList<SVal> L) {
+ T.Profile(ID);
+ ID.AddPointer(L.getInternalPointer());
+}
+
+typedef std::pair<SVal, uintptr_t> SValData;
+typedef std::pair<SVal, SVal> SValPair;
+
+namespace llvm {
+template<> struct FoldingSetTrait<SValData> {
+ static inline void Profile(const SValData& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ ID.AddPointer( (void*) X.second);
+ }
+};
+
+template<> struct FoldingSetTrait<SValPair> {
+ static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ X.second.Profile(ID);
+ }
+};
+}
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData> >
+ PersistentSValsTy;
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair> >
+ PersistentSValPairsTy;
+
+BasicValueFactory::~BasicValueFactory() {
+ // Note that the dstor for the contents of APSIntSet will never be called,
+ // so we iterate over the set and invoke the dstor for each APSInt. This
+ // frees an aux. memory allocated to represent very large constants.
+ for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
+ I->getValue().~APSInt();
+
+ delete (PersistentSValsTy*) PersistentSVals;
+ delete (PersistentSValPairsTy*) PersistentSValPairs;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
+ llvm::FoldingSetNodeID ID;
+ void* InsertPos;
+ typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
+
+ X.Profile(ID);
+ FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(X);
+ APSIntSet.InsertNode(P, InsertPos);
+ }
+
+ return *P;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APInt& X,
+ bool isUnsigned) {
+ llvm::APSInt V(X, isUnsigned);
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
+ bool isUnsigned) {
+ llvm::APSInt V(BitWidth, isUnsigned);
+ V = X;
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
+
+ unsigned bits = Ctx.getTypeSize(T);
+ llvm::APSInt V(bits, T->isUnsignedIntegerType() || Loc::IsLocType(T));
+ V = X;
+ return getValue(V);
+}
+
+const CompoundValData*
+BasicValueFactory::getCompoundValData(QualType T,
+ llvm::ImmutableList<SVal> Vals) {
+
+ llvm::FoldingSetNodeID ID;
+ CompoundValData::Profile(ID, T, Vals);
+ void* InsertPos;
+
+ CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>();
+ new (D) CompoundValData(T, Vals);
+ CompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const llvm::APSInt*
+BasicValueFactory::EvaluateAPSInt(BinaryOperator::Opcode Op,
+ const llvm::APSInt& V1, const llvm::APSInt& V2) {
+
+ switch (Op) {
+ default:
+ assert (false && "Invalid Opcode.");
+
+ case BinaryOperator::Mul:
+ return &getValue( V1 * V2 );
+
+ case BinaryOperator::Div:
+ return &getValue( V1 / V2 );
+
+ case BinaryOperator::Rem:
+ return &getValue( V1 % V2 );
+
+ case BinaryOperator::Add:
+ return &getValue( V1 + V2 );
+
+ case BinaryOperator::Sub:
+ return &getValue( V1 - V2 );
+
+ case BinaryOperator::Shl: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return NULL;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt > V1.getBitWidth())
+ return NULL;
+
+ return &getValue( V1.operator<<( (unsigned) Amt ));
+ }
+
+ case BinaryOperator::Shr: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return NULL;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt > V1.getBitWidth())
+ return NULL;
+
+ return &getValue( V1.operator>>( (unsigned) Amt ));
+ }
+
+ case BinaryOperator::LT:
+ return &getTruthValue( V1 < V2 );
+
+ case BinaryOperator::GT:
+ return &getTruthValue( V1 > V2 );
+
+ case BinaryOperator::LE:
+ return &getTruthValue( V1 <= V2 );
+
+ case BinaryOperator::GE:
+ return &getTruthValue( V1 >= V2 );
+
+ case BinaryOperator::EQ:
+ return &getTruthValue( V1 == V2 );
+
+ case BinaryOperator::NE:
+ return &getTruthValue( V1 != V2 );
+
+ // Note: LAnd, LOr, Comma are handled specially by higher-level logic.
+
+ case BinaryOperator::And:
+ return &getValue( V1 & V2 );
+
+ case BinaryOperator::Or:
+ return &getValue( V1 | V2 );
+
+ case BinaryOperator::Xor:
+ return &getValue( V1 ^ V2 );
+ }
+}
+
+
+const std::pair<SVal, uintptr_t>&
+BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
+
+ // Lazily create the folding set.
+ if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void* InsertPos;
+ V.Profile(ID);
+ ID.AddPointer((void*) Data);
+
+ PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
+
+ typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V, Data));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const std::pair<SVal, SVal>&
+BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
+
+ // Lazily create the folding set.
+ if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void* InsertPos;
+ V1.Profile(ID);
+ V2.Profile(ID);
+
+ PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
+
+ typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V1, V2));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
+ return &getPersistentSValWithData(X, 0).first;
+}
+
+
diff --git a/lib/Analysis/BugReporter.cpp b/lib/Analysis/BugReporter.cpp
new file mode 100644
index 0000000..32998e1
--- /dev/null
+++ b/lib/Analysis/BugReporter.cpp
@@ -0,0 +1,1697 @@
+// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BugReporter, a utility class for generating
+// PathDiagnostics for analyses based on GRSimpleVals.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CFG.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <queue>
+
+using namespace clang;
+
+BugReporterVisitor::~BugReporterVisitor() {}
+BugReporterContext::~BugReporterContext() {
+ for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I)
+ if ((*I)->isOwnedByReporterContext()) delete *I;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper routines for walking the ExplodedGraph and fetching statements.
+//===----------------------------------------------------------------------===//
+
+static inline Stmt* GetStmt(ProgramPoint P) {
+ if (const PostStmt* PS = dyn_cast<PostStmt>(&P))
+ return PS->getStmt();
+ else if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P))
+ return BE->getSrc()->getTerminator();
+
+ return 0;
+}
+
+static inline const ExplodedNode<GRState>*
+GetPredecessorNode(const ExplodedNode<GRState>* N) {
+ return N->pred_empty() ? NULL : *(N->pred_begin());
+}
+
+static inline const ExplodedNode<GRState>*
+GetSuccessorNode(const ExplodedNode<GRState>* N) {
+ return N->succ_empty() ? NULL : *(N->succ_begin());
+}
+
+static Stmt* GetPreviousStmt(const ExplodedNode<GRState>* N) {
+ for (N = GetPredecessorNode(N); N; N = GetPredecessorNode(N))
+ if (Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return 0;
+}
+
+static Stmt* GetNextStmt(const ExplodedNode<GRState>* N) {
+ for (N = GetSuccessorNode(N); N; N = GetSuccessorNode(N))
+ if (Stmt *S = GetStmt(N->getLocation())) {
+ // Check if the statement is '?' or '&&'/'||'. These are "merges",
+ // not actual statement points.
+ switch (S->getStmtClass()) {
+ case Stmt::ChooseExprClass:
+ case Stmt::ConditionalOperatorClass: continue;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator::Opcode Op = cast<BinaryOperator>(S)->getOpcode();
+ if (Op == BinaryOperator::LAnd || Op == BinaryOperator::LOr)
+ continue;
+ break;
+ }
+ default:
+ break;
+ }
+ return S;
+ }
+
+ return 0;
+}
+
+static inline Stmt* GetCurrentOrPreviousStmt(const ExplodedNode<GRState>* N) {
+ if (Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return GetPreviousStmt(N);
+}
+
+static inline Stmt* GetCurrentOrNextStmt(const ExplodedNode<GRState>* N) {
+ if (Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return GetNextStmt(N);
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticBuilder and its associated routines and helper objects.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::DenseMap<const ExplodedNode<GRState>*,
+const ExplodedNode<GRState>*> NodeBackMap;
+
+namespace {
+class VISIBILITY_HIDDEN NodeMapClosure : public BugReport::NodeResolver {
+ NodeBackMap& M;
+public:
+ NodeMapClosure(NodeBackMap *m) : M(*m) {}
+ ~NodeMapClosure() {}
+
+ const ExplodedNode<GRState>* getOriginalNode(const ExplodedNode<GRState>* N) {
+ NodeBackMap::iterator I = M.find(N);
+ return I == M.end() ? 0 : I->second;
+ }
+};
+
+class VISIBILITY_HIDDEN PathDiagnosticBuilder : public BugReporterContext {
+ BugReport *R;
+ PathDiagnosticClient *PDC;
+ llvm::OwningPtr<ParentMap> PM;
+ NodeMapClosure NMC;
+public:
+ PathDiagnosticBuilder(GRBugReporter &br,
+ BugReport *r, NodeBackMap *Backmap,
+ PathDiagnosticClient *pdc)
+ : BugReporterContext(br),
+ R(r), PDC(pdc), NMC(Backmap)
+ {
+ addVisitor(R);
+ }
+
+ PathDiagnosticLocation ExecutionContinues(const ExplodedNode<GRState>* N);
+
+ PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream& os,
+ const ExplodedNode<GRState>* N);
+
+ ParentMap& getParentMap() {
+ if (PM.get() == 0)
+ PM.reset(new ParentMap(getCodeDecl().getBody(getASTContext())));
+ return *PM.get();
+ }
+
+ const Stmt *getParent(const Stmt *S) {
+ return getParentMap().getParent(S);
+ }
+
+ virtual NodeMapClosure& getNodeResolver() { return NMC; }
+ BugReport& getReport() { return *R; }
+
+ PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
+
+ PathDiagnosticLocation
+ getEnclosingStmtLocation(const PathDiagnosticLocation &L) {
+ if (const Stmt *S = L.asStmt())
+ return getEnclosingStmtLocation(S);
+
+ return L;
+ }
+
+ PathDiagnosticClient::PathGenerationScheme getGenerationScheme() const {
+ return PDC ? PDC->getGenerationScheme() : PathDiagnosticClient::Extensive;
+ }
+
+ bool supportsLogicalOpControlFlow() const {
+ return PDC ? PDC->supportsLogicalOpControlFlow() : true;
+ }
+};
+} // end anonymous namespace
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode<GRState>* N) {
+ if (Stmt *S = GetNextStmt(N))
+ return PathDiagnosticLocation(S, getSourceManager());
+
+ return FullSourceLoc(getCodeDecl().getBodyRBrace(getASTContext()),
+ getSourceManager());
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream& os,
+ const ExplodedNode<GRState>* N) {
+
+ // Slow, but probably doesn't matter.
+ if (os.str().empty())
+ os << ' ';
+
+ const PathDiagnosticLocation &Loc = ExecutionContinues(N);
+
+ if (Loc.asStmt())
+ os << "Execution continues on line "
+ << getSourceManager().getInstantiationLineNumber(Loc.asLocation())
+ << '.';
+ else
+ os << "Execution jumps to the end of the "
+ << (isa<ObjCMethodDecl>(getCodeDecl()) ? "method" : "function") << '.';
+
+ return Loc;
+}
+
+static bool IsNested(const Stmt *S, ParentMap &PM) {
+ if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S)))
+ return true;
+
+ const Stmt *Parent = PM.getParentIgnoreParens(S);
+
+ if (Parent)
+ switch (Parent->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::WhileStmtClass:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
+ assert(S && "Null Stmt* passed to getEnclosingStmtLocation");
+ ParentMap &P = getParentMap();
+ SourceManager &SMgr = getSourceManager();
+
+ while (IsNested(S, P)) {
+ const Stmt *Parent = P.getParentIgnoreParens(S);
+
+ if (!Parent)
+ break;
+
+ switch (Parent->getStmtClass()) {
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *B = cast<BinaryOperator>(Parent);
+ if (B->isLogicalOp())
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ }
+ case Stmt::CompoundStmtClass:
+ case Stmt::StmtExprClass:
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::ChooseExprClass:
+ // Similar to '?' if we are referring to condition, just have the edge
+ // point to the entire choose expression.
+ if (cast<ChooseExpr>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr);
+ else
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::ConditionalOperatorClass:
+ // For '?', if we are referring to condition, just have the edge point
+ // to the entire '?' expression.
+ if (cast<ConditionalOperator>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr);
+ else
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::DoStmtClass:
+ return PathDiagnosticLocation(S, SMgr);
+ case Stmt::ForStmtClass:
+ if (cast<ForStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ case Stmt::IfStmtClass:
+ if (cast<IfStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ if (cast<ObjCForCollectionStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ case Stmt::WhileStmtClass:
+ if (cast<WhileStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr);
+ break;
+ default:
+ break;
+ }
+
+ S = Parent;
+ }
+
+ assert(S && "Cannot have null Stmt for PathDiagnosticLocation");
+
+ // Special case: DeclStmts can appear in for statement declarations, in which
+ // case the ForStmt is the context.
+ if (isa<DeclStmt>(S)) {
+ if (const Stmt *Parent = P.getParent(S)) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
+ return PathDiagnosticLocation(Parent, SMgr);
+ default:
+ break;
+ }
+ }
+ }
+ else if (isa<BinaryOperator>(S)) {
+ // Special case: the binary operator represents the initialization
+ // code in a for statement (this can happen when the variable being
+ // initialized is an old variable.
+ if (const ForStmt *FS =
+ dyn_cast_or_null<ForStmt>(P.getParentIgnoreParens(S))) {
+ if (FS->getInit() == S)
+ return PathDiagnosticLocation(FS, SMgr);
+ }
+ }
+
+ return PathDiagnosticLocation(S, SMgr);
+}
+
+//===----------------------------------------------------------------------===//
+// ScanNotableSymbols: closure-like callback for scanning Store bindings.
+//===----------------------------------------------------------------------===//
+
+static const VarDecl*
+GetMostRecentVarDeclBinding(const ExplodedNode<GRState>* N,
+ GRStateManager& VMgr, SVal X) {
+
+ for ( ; N ; N = N->pred_empty() ? 0 : *N->pred_begin()) {
+
+ ProgramPoint P = N->getLocation();
+
+ if (!isa<PostStmt>(P))
+ continue;
+
+ DeclRefExpr* DR = dyn_cast<DeclRefExpr>(cast<PostStmt>(P).getStmt());
+
+ if (!DR)
+ continue;
+
+ SVal Y = VMgr.GetSVal(N->getState(), DR);
+
+ if (X != Y)
+ continue;
+
+ VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl());
+
+ if (!VD)
+ continue;
+
+ return VD;
+ }
+
+ return 0;
+}
+
+namespace {
+class VISIBILITY_HIDDEN NotableSymbolHandler
+: public StoreManager::BindingsHandler {
+
+ SymbolRef Sym;
+ const GRState* PrevSt;
+ const Stmt* S;
+ GRStateManager& VMgr;
+ const ExplodedNode<GRState>* Pred;
+ PathDiagnostic& PD;
+ BugReporter& BR;
+
+public:
+
+ NotableSymbolHandler(SymbolRef sym, const GRState* prevst, const Stmt* s,
+ GRStateManager& vmgr, const ExplodedNode<GRState>* pred,
+ PathDiagnostic& pd, BugReporter& br)
+ : Sym(sym), PrevSt(prevst), S(s), VMgr(vmgr), Pred(pred), PD(pd), BR(br) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
+ SVal V) {
+
+ SymbolRef ScanSym = V.getAsSymbol();
+
+ if (ScanSym != Sym)
+ return true;
+
+ // Check if the previous state has this binding.
+ SVal X = VMgr.GetSVal(PrevSt, loc::MemRegionVal(R));
+
+ if (X == V) // Same binding?
+ return true;
+
+ // Different binding. Only handle assignments for now. We don't pull
+ // this check out of the loop because we will eventually handle other
+ // cases.
+
+ VarDecl *VD = 0;
+
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (!B->isAssignmentOp())
+ return true;
+
+ // What variable did we assign to?
+ DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenCasts());
+
+ if (!DR)
+ return true;
+
+ VD = dyn_cast<VarDecl>(DR->getDecl());
+ }
+ else if (const DeclStmt* DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: Eventually CFGs won't have DeclStmts. Right now we
+ // assume that each DeclStmt has a single Decl. This invariant
+ // holds by contruction in the CFG.
+ VD = dyn_cast<VarDecl>(*DS->decl_begin());
+ }
+
+ if (!VD)
+ return true;
+
+ // What is the most recently referenced variable with this binding?
+ const VarDecl* MostRecent = GetMostRecentVarDeclBinding(Pred, VMgr, V);
+
+ if (!MostRecent)
+ return true;
+
+ // Create the diagnostic.
+ FullSourceLoc L(S->getLocStart(), BR.getSourceManager());
+
+ if (Loc::IsLocType(VD->getType())) {
+ std::string msg = "'" + std::string(VD->getNameAsString()) +
+ "' now aliases '" + MostRecent->getNameAsString() + "'";
+
+ PD.push_front(new PathDiagnosticEventPiece(L, msg));
+ }
+
+ return true;
+ }
+};
+}
+
+static void HandleNotableSymbol(const ExplodedNode<GRState>* N,
+ const Stmt* S,
+ SymbolRef Sym, BugReporter& BR,
+ PathDiagnostic& PD) {
+
+ const ExplodedNode<GRState>* Pred = N->pred_empty() ? 0 : *N->pred_begin();
+ const GRState* PrevSt = Pred ? Pred->getState() : 0;
+
+ if (!PrevSt)
+ return;
+
+ // Look at the region bindings of the current state that map to the
+ // specified symbol. Are any of them not in the previous state?
+ GRStateManager& VMgr = cast<GRBugReporter>(BR).getStateManager();
+ NotableSymbolHandler H(Sym, PrevSt, S, VMgr, Pred, PD, BR);
+ cast<GRBugReporter>(BR).getStateManager().iterBindings(N->getState(), H);
+}
+
+namespace {
+class VISIBILITY_HIDDEN ScanNotableSymbols
+: public StoreManager::BindingsHandler {
+
+ llvm::SmallSet<SymbolRef, 10> AlreadyProcessed;
+ const ExplodedNode<GRState>* N;
+ Stmt* S;
+ GRBugReporter& BR;
+ PathDiagnostic& PD;
+
+public:
+ ScanNotableSymbols(const ExplodedNode<GRState>* n, Stmt* s, GRBugReporter& br,
+ PathDiagnostic& pd)
+ : N(n), S(s), BR(br), PD(pd) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store,
+ const MemRegion* R, SVal V) {
+
+ SymbolRef ScanSym = V.getAsSymbol();
+
+ if (!ScanSym)
+ return true;
+
+ if (!BR.isNotable(ScanSym))
+ return true;
+
+ if (AlreadyProcessed.count(ScanSym))
+ return true;
+
+ AlreadyProcessed.insert(ScanSym);
+
+ HandleNotableSymbol(N, S, ScanSym, BR, PD);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// "Minimal" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+
+static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM);
+
+static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode<GRState> *N) {
+
+ SourceManager& SMgr = PDB.getSourceManager();
+ const ExplodedNode<GRState>* NextNode = N->pred_empty()
+ ? NULL : *(N->pred_begin());
+ while (NextNode) {
+ N = NextNode;
+ NextNode = GetPredecessorNode(N);
+
+ ProgramPoint P = N->getLocation();
+
+ if (const BlockEdge* BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock* Src = BE->getSrc();
+ CFGBlock* Dst = BE->getDst();
+ Stmt* T = Src->getTerminator();
+
+ if (!T)
+ continue;
+
+ FullSourceLoc Start(T->getLocStart(), SMgr);
+
+ switch (T->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::GotoStmtClass:
+ case Stmt::IndirectGotoStmtClass: {
+ Stmt* S = GetNextStmt(N);
+
+ if (!S)
+ continue;
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
+
+ os << "Control jumps to line "
+ << End.asLocation().getInstantiationLineNumber();
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ // Figure out what case arm we took.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (Stmt* S = Dst->getLabel()) {
+ PathDiagnosticLocation End(S, SMgr);
+
+ switch (S->getStmtClass()) {
+ default:
+ os << "No cases match in the switch statement. "
+ "Control jumps to line "
+ << End.asLocation().getInstantiationLineNumber();
+ break;
+ case Stmt::DefaultStmtClass:
+ os << "Control jumps to the 'default' case at line "
+ << End.asLocation().getInstantiationLineNumber();
+ break;
+
+ case Stmt::CaseStmtClass: {
+ os << "Control jumps to 'case ";
+ CaseStmt* Case = cast<CaseStmt>(S);
+ Expr* LHS = Case->getLHS()->IgnoreParenCasts();
+
+ // Determine if it is an enum.
+ bool GetRawInt = true;
+
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS)) {
+ // FIXME: Maybe this should be an assertion. Are there cases
+ // were it is not an EnumConstantDecl?
+ EnumConstantDecl* D =
+ dyn_cast<EnumConstantDecl>(DR->getDecl());
+
+ if (D) {
+ GetRawInt = false;
+ os << D->getNameAsString();
+ }
+ }
+
+ if (GetRawInt)
+ os << LHS->EvaluateAsInt(PDB.getASTContext());
+
+ os << ":' at line "
+ << End.asLocation().getInstantiationLineNumber();
+ break;
+ }
+ }
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "'Default' branch taken. ";
+ const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+
+ break;
+ }
+
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ // Determine control-flow for ternary '?'.
+ case Stmt::ConditionalOperatorClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "'?' condition is ";
+
+ if (*(Src->succ_begin()+1) == Dst)
+ os << "false";
+ else
+ os << "true";
+
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ // Determine control-flow for short-circuited '&&' and '||'.
+ case Stmt::BinaryOperatorClass: {
+ if (!PDB.supportsLogicalOpControlFlow())
+ break;
+
+ BinaryOperator *B = cast<BinaryOperator>(T);
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Left side of '";
+
+ if (B->getOpcode() == BinaryOperator::LAnd) {
+ os << "&&" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation End(B->getLHS(), SMgr);
+ PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ }
+ else {
+ assert(B->getOpcode() == BinaryOperator::LOr);
+ os << "||" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation End(B->getLHS(), SMgr);
+ PathDiagnosticLocation Start(B->getOperatorLoc(), SMgr);
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ }
+
+ break;
+ }
+
+ case Stmt::DoStmtClass: {
+ if (*(Src->succ_begin()) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is true. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Loop condition is false. Exiting loop"));
+ }
+
+ break;
+ }
+
+ case Stmt::WhileStmtClass:
+ case Stmt::ForStmtClass: {
+ if (*(Src->succ_begin()+1) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is false. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Loop condition is true. Entering loop body"));
+ }
+
+ break;
+ }
+
+ case Stmt::IfStmtClass: {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ if (*(Src->succ_begin()+1) == Dst)
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Taking false branch"));
+ else
+ PD.push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Taking true branch"));
+
+ break;
+ }
+ }
+ }
+
+ if (NextNode) {
+ for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
+ E = PDB.visitor_end(); I!=E; ++I) {
+ if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB))
+ PD.push_front(p);
+ }
+ }
+
+ if (const PostStmt* PS = dyn_cast<PostStmt>(&P)) {
+ // Scan the region bindings, and see if a "notable" symbol has a new
+ // lval binding.
+ ScanNotableSymbols SNS(N, PS->getStmt(), PDB.getBugReporter(), PD);
+ PDB.getStateManager().iterBindings(N->getState(), SNS);
+ }
+ }
+
+ // After constructing the full PathDiagnostic, do a pass over it to compact
+ // PathDiagnosticPieces that occur within a macro.
+ CompactPathDiagnostic(PD, PDB.getSourceManager());
+}
+
+//===----------------------------------------------------------------------===//
+// "Extensive" PathDiagnostic generation.
+//===----------------------------------------------------------------------===//
+
+static bool IsControlFlowExpr(const Stmt *S) {
+ const Expr *E = dyn_cast<Expr>(S);
+
+ if (!E)
+ return false;
+
+ E = E->IgnoreParenCasts();
+
+ if (isa<ConditionalOperator>(E))
+ return true;
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
+ if (B->isLogicalOp())
+ return true;
+
+ return false;
+}
+
+namespace {
+class VISIBILITY_HIDDEN ContextLocation : public PathDiagnosticLocation {
+ bool IsDead;
+public:
+ ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
+ : PathDiagnosticLocation(L), IsDead(isdead) {}
+
+ void markDead() { IsDead = true; }
+ bool isDead() const { return IsDead; }
+};
+
+class VISIBILITY_HIDDEN EdgeBuilder {
+ std::vector<ContextLocation> CLocs;
+ typedef std::vector<ContextLocation>::iterator iterator;
+ PathDiagnostic &PD;
+ PathDiagnosticBuilder &PDB;
+ PathDiagnosticLocation PrevLoc;
+
+ bool IsConsumedExpr(const PathDiagnosticLocation &L);
+
+ bool containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee);
+
+ PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
+
+ PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
+ bool firstCharOnly = false) {
+ if (const Stmt *S = L.asStmt()) {
+ const Stmt *Original = S;
+ while (1) {
+ // Adjust the location for some expressions that are best referenced
+ // by one of their subexpressions.
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::ParenExprClass:
+ S = cast<ParenExpr>(S)->IgnoreParens();
+ firstCharOnly = true;
+ continue;
+ case Stmt::ConditionalOperatorClass:
+ S = cast<ConditionalOperator>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::ChooseExprClass:
+ S = cast<ChooseExpr>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryOperatorClass:
+ S = cast<BinaryOperator>(S)->getLHS();
+ firstCharOnly = true;
+ continue;
+ }
+
+ break;
+ }
+
+ if (S != Original)
+ L = PathDiagnosticLocation(S, L.getManager());
+ }
+
+ if (firstCharOnly)
+ L = PathDiagnosticLocation(L.asLocation());
+
+ return L;
+ }
+
+ void popLocation() {
+ if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
+ // For contexts, we only one the first character as the range.
+ rawAddEdge(cleanUpLocation(CLocs.back(), true));
+ }
+ CLocs.pop_back();
+ }
+
+ PathDiagnosticLocation IgnoreParens(const PathDiagnosticLocation &L);
+
+public:
+ EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
+ : PD(pd), PDB(pdb) {
+
+ // If the PathDiagnostic already has pieces, add the enclosing statement
+ // of the first piece as a context as well.
+ if (!PD.empty()) {
+ PrevLoc = PD.begin()->getLocation();
+
+ if (const Stmt *S = PrevLoc.asStmt())
+ addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+
+ ~EdgeBuilder() {
+ while (!CLocs.empty()) popLocation();
+
+ // Finally, add an initial edge from the start location of the first
+ // statement (if it doesn't already exist).
+ // FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
+ if (const CompoundStmt *CS =
+ PDB.getCodeDecl().getCompoundBody(PDB.getASTContext()))
+ if (!CS->body_empty()) {
+ SourceLocation Loc = (*CS->body_begin())->getLocStart();
+ rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager()));
+ }
+
+ }
+
+ void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false);
+
+ void addEdge(const Stmt *S, bool alwaysAdd = false) {
+ addEdge(PathDiagnosticLocation(S, PDB.getSourceManager()), alwaysAdd);
+ }
+
+ void rawAddEdge(PathDiagnosticLocation NewLoc);
+
+ void addContext(const Stmt *S);
+ void addExtendedContext(const Stmt *S);
+};
+} // end anonymous namespace
+
+
+PathDiagnosticLocation
+EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) {
+ if (const Stmt *S = L.asStmt()) {
+ if (IsControlFlowExpr(S))
+ return L;
+
+ return PDB.getEnclosingStmtLocation(S);
+ }
+
+ return L;
+}
+
+bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee) {
+
+ if (Container == Containee)
+ return true;
+
+ if (Container.asDecl())
+ return true;
+
+ if (const Stmt *S = Containee.asStmt())
+ if (const Stmt *ContainerS = Container.asStmt()) {
+ while (S) {
+ if (S == ContainerS)
+ return true;
+ S = PDB.getParent(S);
+ }
+ return false;
+ }
+
+ // Less accurate: compare using source ranges.
+ SourceRange ContainerR = Container.asRange();
+ SourceRange ContaineeR = Containee.asRange();
+
+ SourceManager &SM = PDB.getSourceManager();
+ SourceLocation ContainerRBeg = SM.getInstantiationLoc(ContainerR.getBegin());
+ SourceLocation ContainerREnd = SM.getInstantiationLoc(ContainerR.getEnd());
+ SourceLocation ContaineeRBeg = SM.getInstantiationLoc(ContaineeR.getBegin());
+ SourceLocation ContaineeREnd = SM.getInstantiationLoc(ContaineeR.getEnd());
+
+ unsigned ContainerBegLine = SM.getInstantiationLineNumber(ContainerRBeg);
+ unsigned ContainerEndLine = SM.getInstantiationLineNumber(ContainerREnd);
+ unsigned ContaineeBegLine = SM.getInstantiationLineNumber(ContaineeRBeg);
+ unsigned ContaineeEndLine = SM.getInstantiationLineNumber(ContaineeREnd);
+
+ assert(ContainerBegLine <= ContainerEndLine);
+ assert(ContaineeBegLine <= ContaineeEndLine);
+
+ return (ContainerBegLine <= ContaineeBegLine &&
+ ContainerEndLine >= ContaineeEndLine &&
+ (ContainerBegLine != ContaineeBegLine ||
+ SM.getInstantiationColumnNumber(ContainerRBeg) <=
+ SM.getInstantiationColumnNumber(ContaineeRBeg)) &&
+ (ContainerEndLine != ContaineeEndLine ||
+ SM.getInstantiationColumnNumber(ContainerREnd) >=
+ SM.getInstantiationColumnNumber(ContainerREnd)));
+}
+
+PathDiagnosticLocation
+EdgeBuilder::IgnoreParens(const PathDiagnosticLocation &L) {
+ if (const Expr* E = dyn_cast_or_null<Expr>(L.asStmt()))
+ return PathDiagnosticLocation(E->IgnoreParenCasts(),
+ PDB.getSourceManager());
+ return L;
+}
+
+void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
+ if (!PrevLoc.isValid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
+ const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc);
+ const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc);
+
+ if (NewLocClean.asLocation() == PrevLocClean.asLocation())
+ return;
+
+ // FIXME: Ignore intra-macro edges for now.
+ if (NewLocClean.asLocation().getInstantiationLoc() ==
+ PrevLocClean.asLocation().getInstantiationLoc())
+ return;
+
+ PD.push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean));
+ PrevLoc = NewLoc;
+}
+
+void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
+
+ if (!alwaysAdd && NewLoc.asLocation().isMacroID())
+ return;
+
+ const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
+
+ while (!CLocs.empty()) {
+ ContextLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == CLoc) {
+ if (alwaysAdd) {
+ if (IsConsumedExpr(TopContextLoc) &&
+ !IsControlFlowExpr(TopContextLoc.asStmt()))
+ TopContextLoc.markDead();
+
+ rawAddEdge(NewLoc);
+ }
+
+ return;
+ }
+
+ if (containsLocation(TopContextLoc, CLoc)) {
+ if (alwaysAdd) {
+ rawAddEdge(NewLoc);
+
+ if (IsConsumedExpr(CLoc) && !IsControlFlowExpr(CLoc.asStmt())) {
+ CLocs.push_back(ContextLocation(CLoc, true));
+ return;
+ }
+ }
+
+ CLocs.push_back(CLoc);
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ // If we reach here, there is no enclosing context. Just add the edge.
+ rawAddEdge(NewLoc);
+}
+
+bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
+ if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
+ return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
+
+ return false;
+}
+
+void EdgeBuilder::addExtendedContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ const Stmt *Parent = PDB.getParent(S);
+ while (Parent) {
+ if (isa<CompoundStmt>(Parent))
+ Parent = PDB.getParent(Parent);
+ else
+ break;
+ }
+
+ if (Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::DoStmtClass:
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ addContext(Parent);
+ default:
+ break;
+ }
+ }
+
+ addContext(S);
+}
+
+void EdgeBuilder::addContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ PathDiagnosticLocation L(S, PDB.getSourceManager());
+
+ while (!CLocs.empty()) {
+ const PathDiagnosticLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == L)
+ return;
+
+ if (containsLocation(TopContextLoc, L)) {
+ CLocs.push_back(L);
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ CLocs.push_back(L);
+}
+
+static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode<GRState> *N) {
+
+
+ EdgeBuilder EB(PD, PDB);
+
+ const ExplodedNode<GRState>* NextNode = N->pred_empty()
+ ? NULL : *(N->pred_begin());
+ while (NextNode) {
+ N = NextNode;
+ NextNode = GetPredecessorNode(N);
+ ProgramPoint P = N->getLocation();
+
+ do {
+ // Block edges.
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ const CFGBlock &Blk = *BE->getSrc();
+ const Stmt *Term = Blk.getTerminator();
+
+ // Are we jumping to the head of a loop? Add a special diagnostic.
+ if (const Stmt *Loop = BE->getDst()->getLoopTarget()) {
+ PathDiagnosticLocation L(Loop, PDB.getSourceManager());
+ const CompoundStmt *CS = NULL;
+
+ if (!Term) {
+ if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(FS->getBody());
+ else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
+ }
+
+ PathDiagnosticEventPiece *p =
+ new PathDiagnosticEventPiece(L,
+ "Looping back to the head of the loop");
+
+ EB.addEdge(p->getLocation(), true);
+ PD.push_front(p);
+
+ if (CS) {
+ PathDiagnosticLocation BL(CS->getRBracLoc(),
+ PDB.getSourceManager());
+ BL = PathDiagnosticLocation(BL.asLocation());
+ EB.addEdge(BL);
+ }
+ }
+
+ if (Term)
+ EB.addContext(Term);
+
+ break;
+ }
+
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ if (const Stmt* S = BE->getFirstStmt()) {
+ if (IsControlFlowExpr(S)) {
+ // Add the proper context for '&&', '||', and '?'.
+ EB.addContext(S);
+ }
+ else
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+
+ break;
+ }
+ } while (0);
+
+ if (!NextNode)
+ continue;
+
+ for (BugReporterContext::visitor_iterator I = PDB.visitor_begin(),
+ E = PDB.visitor_end(); I!=E; ++I) {
+ if (PathDiagnosticPiece* p = (*I)->VisitNode(N, NextNode, PDB)) {
+ const PathDiagnosticLocation &Loc = p->getLocation();
+ EB.addEdge(Loc, true);
+ PD.push_front(p);
+ if (const Stmt *S = Loc.asStmt())
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugType and subclasses.
+//===----------------------------------------------------------------------===//
+BugType::~BugType() {}
+void BugType::FlushReports(BugReporter &BR) {}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReport and subclasses.
+//===----------------------------------------------------------------------===//
+BugReport::~BugReport() {}
+RangedBugReport::~RangedBugReport() {}
+
+Stmt* BugReport::getStmt(BugReporter& BR) const {
+ ProgramPoint ProgP = EndNode->getLocation();
+ Stmt *S = NULL;
+
+ if (BlockEntrance* BE = dyn_cast<BlockEntrance>(&ProgP)) {
+ if (BE->getBlock() == &BR.getCFG()->getExit()) S = GetPreviousStmt(EndNode);
+ }
+ if (!S) S = GetStmt(ProgP);
+
+ return S;
+}
+
+PathDiagnosticPiece*
+BugReport::getEndPath(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* EndPathNode) {
+
+ Stmt* S = getStmt(BRC.getBugReporter());
+
+ if (!S)
+ return NULL;
+
+ const SourceRange *Beg, *End;
+ getRanges(BRC.getBugReporter(), Beg, End);
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+
+ // Only add the statement itself as a range if we didn't specify any
+ // special ranges for this report.
+ PathDiagnosticPiece* P = new PathDiagnosticEventPiece(L, getDescription(),
+ Beg == End);
+
+ for (; Beg != End; ++Beg)
+ P->addRange(*Beg);
+
+ return P;
+}
+
+void BugReport::getRanges(BugReporter& BR, const SourceRange*& beg,
+ const SourceRange*& end) {
+
+ if (Expr* E = dyn_cast_or_null<Expr>(getStmt(BR))) {
+ R = E->getSourceRange();
+ assert(R.isValid());
+ beg = &R;
+ end = beg+1;
+ }
+ else
+ beg = end = 0;
+}
+
+SourceLocation BugReport::getLocation() const {
+ if (EndNode)
+ if (Stmt* S = GetCurrentOrPreviousStmt(EndNode)) {
+ // For member expressions, return the location of the '.' or '->'.
+ if (MemberExpr* ME = dyn_cast<MemberExpr>(S))
+ return ME->getMemberLoc();
+
+ return S->getLocStart();
+ }
+
+ return FullSourceLoc();
+}
+
+PathDiagnosticPiece* BugReport::VisitNode(const ExplodedNode<GRState>* N,
+ const ExplodedNode<GRState>* PrevN,
+ BugReporterContext &BRC) {
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReporter and subclasses.
+//===----------------------------------------------------------------------===//
+
+BugReportEquivClass::~BugReportEquivClass() {
+ for (iterator I=begin(), E=end(); I!=E; ++I) delete *I;
+}
+
+GRBugReporter::~GRBugReporter() { FlushReports(); }
+BugReporterData::~BugReporterData() {}
+
+ExplodedGraph<GRState>&
+GRBugReporter::getGraph() { return Eng.getGraph(); }
+
+GRStateManager&
+GRBugReporter::getStateManager() { return Eng.getStateManager(); }
+
+BugReporter::~BugReporter() { FlushReports(); }
+
+void BugReporter::FlushReports() {
+ if (BugTypes.isEmpty())
+ return;
+
+ // First flush the warnings for each BugType. This may end up creating new
+ // warnings and new BugTypes. Because ImmutableSet is a functional data
+ // structure, we do not need to worry about the iterators being invalidated.
+ for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(*this);
+
+ // Iterate through BugTypes a second time. BugTypes may have been updated
+ // with new BugType objects and new warnings.
+ for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I) {
+ BugType *BT = const_cast<BugType*>(*I);
+
+ typedef llvm::FoldingSet<BugReportEquivClass> SetTy;
+ SetTy& EQClasses = BT->EQClasses;
+
+ for (SetTy::iterator EI=EQClasses.begin(), EE=EQClasses.end(); EI!=EE;++EI){
+ BugReportEquivClass& EQ = *EI;
+ FlushReport(EQ);
+ }
+
+ // Delete the BugType object. This will also delete the equivalence
+ // classes.
+ delete BT;
+ }
+
+ // Remove all references to the BugType objects.
+ BugTypes = F.GetEmptySet();
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnostics generation.
+//===----------------------------------------------------------------------===//
+
+static std::pair<std::pair<ExplodedGraph<GRState>*, NodeBackMap*>,
+ std::pair<ExplodedNode<GRState>*, unsigned> >
+MakeReportGraph(const ExplodedGraph<GRState>* G,
+ const ExplodedNode<GRState>** NStart,
+ const ExplodedNode<GRState>** NEnd) {
+
+ // Create the trimmed graph. It will contain the shortest paths from the
+ // error nodes to the root. In the new graph we should only have one
+ // error node unless there are two or more error nodes with the same minimum
+ // path length.
+ ExplodedGraph<GRState>* GTrim;
+ InterExplodedGraphMap<GRState>* NMap;
+
+ llvm::DenseMap<const void*, const void*> InverseMap;
+ llvm::tie(GTrim, NMap) = G->Trim(NStart, NEnd, &InverseMap);
+
+ // Create owning pointers for GTrim and NMap just to ensure that they are
+ // released when this function exists.
+ llvm::OwningPtr<ExplodedGraph<GRState> > AutoReleaseGTrim(GTrim);
+ llvm::OwningPtr<InterExplodedGraphMap<GRState> > AutoReleaseNMap(NMap);
+
+ // Find the (first) error node in the trimmed graph. We just need to consult
+ // the node map (NMap) which maps from nodes in the original graph to nodes
+ // in the new graph.
+
+ std::queue<const ExplodedNode<GRState>*> WS;
+ typedef llvm::DenseMap<const ExplodedNode<GRState>*,unsigned> IndexMapTy;
+ IndexMapTy IndexMap;
+
+ for (const ExplodedNode<GRState>** I = NStart; I != NEnd; ++I)
+ if (const ExplodedNode<GRState> *N = NMap->getMappedNode(*I)) {
+ unsigned NodeIndex = (I - NStart) / sizeof(*I);
+ WS.push(N);
+ IndexMap[*I] = NodeIndex;
+ }
+
+ assert(!WS.empty() && "No error node found in the trimmed graph.");
+
+ // Create a new (third!) graph with a single path. This is the graph
+ // that will be returned to the caller.
+ ExplodedGraph<GRState> *GNew =
+ new ExplodedGraph<GRState>(GTrim->getCFG(), GTrim->getCodeDecl(),
+ GTrim->getContext());
+
+ // Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
+ // to the root node, and then construct a new graph that contains only
+ // a single path.
+ llvm::DenseMap<const void*,unsigned> Visited;
+
+ unsigned cnt = 0;
+ const ExplodedNode<GRState>* Root = 0;
+
+ while (!WS.empty()) {
+ const ExplodedNode<GRState>* Node = WS.front();
+ WS.pop();
+
+ if (Visited.find(Node) != Visited.end())
+ continue;
+
+ Visited[Node] = cnt++;
+
+ if (Node->pred_empty()) {
+ Root = Node;
+ break;
+ }
+
+ for (ExplodedNode<GRState>::const_pred_iterator I=Node->pred_begin(),
+ E=Node->pred_end(); I!=E; ++I)
+ WS.push(*I);
+ }
+
+ assert(Root);
+
+ // Now walk from the root down the BFS path, always taking the successor
+ // with the lowest number.
+ ExplodedNode<GRState> *Last = 0, *First = 0;
+ NodeBackMap *BM = new NodeBackMap();
+ unsigned NodeIndex = 0;
+
+ for ( const ExplodedNode<GRState> *N = Root ;;) {
+ // Lookup the number associated with the current node.
+ llvm::DenseMap<const void*,unsigned>::iterator I = Visited.find(N);
+ assert(I != Visited.end());
+
+ // Create the equivalent node in the new graph with the same state
+ // and location.
+ ExplodedNode<GRState>* NewN =
+ GNew->getNode(N->getLocation(), N->getState());
+
+ // Store the mapping to the original node.
+ llvm::DenseMap<const void*, const void*>::iterator IMitr=InverseMap.find(N);
+ assert(IMitr != InverseMap.end() && "No mapping to original node.");
+ (*BM)[NewN] = (const ExplodedNode<GRState>*) IMitr->second;
+
+ // Link up the new node with the previous node.
+ if (Last)
+ NewN->addPredecessor(Last);
+
+ Last = NewN;
+
+ // Are we at the final node?
+ IndexMapTy::iterator IMI =
+ IndexMap.find((const ExplodedNode<GRState>*)(IMitr->second));
+ if (IMI != IndexMap.end()) {
+ First = NewN;
+ NodeIndex = IMI->second;
+ break;
+ }
+
+ // Find the next successor node. We choose the node that is marked
+ // with the lowest DFS number.
+ ExplodedNode<GRState>::const_succ_iterator SI = N->succ_begin();
+ ExplodedNode<GRState>::const_succ_iterator SE = N->succ_end();
+ N = 0;
+
+ for (unsigned MinVal = 0; SI != SE; ++SI) {
+
+ I = Visited.find(*SI);
+
+ if (I == Visited.end())
+ continue;
+
+ if (!N || I->second < MinVal) {
+ N = *SI;
+ MinVal = I->second;
+ }
+ }
+
+ assert(N);
+ }
+
+ assert(First);
+
+ return std::make_pair(std::make_pair(GNew, BM),
+ std::make_pair(First, NodeIndex));
+}
+
+/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
+/// and collapses PathDiagosticPieces that are expanded by macros.
+static void CompactPathDiagnostic(PathDiagnostic &PD, const SourceManager& SM) {
+ typedef std::vector<std::pair<PathDiagnosticMacroPiece*, SourceLocation> >
+ MacroStackTy;
+
+ typedef std::vector<PathDiagnosticPiece*>
+ PiecesTy;
+
+ MacroStackTy MacroStack;
+ PiecesTy Pieces;
+
+ for (PathDiagnostic::iterator I = PD.begin(), E = PD.end(); I!=E; ++I) {
+ // Get the location of the PathDiagnosticPiece.
+ const FullSourceLoc Loc = I->getLocation().asLocation();
+
+ // Determine the instantiation location, which is the location we group
+ // related PathDiagnosticPieces.
+ SourceLocation InstantiationLoc = Loc.isMacroID() ?
+ SM.getInstantiationLoc(Loc) :
+ SourceLocation();
+
+ if (Loc.isFileID()) {
+ MacroStack.clear();
+ Pieces.push_back(&*I);
+ continue;
+ }
+
+ assert(Loc.isMacroID());
+
+ // Is the PathDiagnosticPiece within the same macro group?
+ if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
+ MacroStack.back().first->push_back(&*I);
+ continue;
+ }
+
+ // We aren't in the same group. Are we descending into a new macro
+ // or are part of an old one?
+ PathDiagnosticMacroPiece *MacroGroup = 0;
+
+ SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
+ SM.getInstantiationLoc(Loc) :
+ SourceLocation();
+
+ // Walk the entire macro stack.
+ while (!MacroStack.empty()) {
+ if (InstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ if (ParentInstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ MacroStack.pop_back();
+ }
+
+ if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) {
+ // Create a new macro group and add it to the stack.
+ PathDiagnosticMacroPiece *NewGroup = new PathDiagnosticMacroPiece(Loc);
+
+ if (MacroGroup)
+ MacroGroup->push_back(NewGroup);
+ else {
+ assert(InstantiationLoc.isFileID());
+ Pieces.push_back(NewGroup);
+ }
+
+ MacroGroup = NewGroup;
+ MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc));
+ }
+
+ // Finally, add the PathDiagnosticPiece to the group.
+ MacroGroup->push_back(&*I);
+ }
+
+ // Now take the pieces and construct a new PathDiagnostic.
+ PD.resetPath(false);
+
+ for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I) {
+ if (PathDiagnosticMacroPiece *MP=dyn_cast<PathDiagnosticMacroPiece>(*I))
+ if (!MP->containsEvent()) {
+ delete MP;
+ continue;
+ }
+
+ PD.push_back(*I);
+ }
+}
+
+void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
+ BugReportEquivClass& EQ) {
+
+ std::vector<const ExplodedNode<GRState>*> Nodes;
+
+ for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
+ const ExplodedNode<GRState>* N = I->getEndNode();
+ if (N) Nodes.push_back(N);
+ }
+
+ if (Nodes.empty())
+ return;
+
+ // Construct a new graph that contains only a single path from the error
+ // node to a root.
+ const std::pair<std::pair<ExplodedGraph<GRState>*, NodeBackMap*>,
+ std::pair<ExplodedNode<GRState>*, unsigned> >&
+ GPair = MakeReportGraph(&getGraph(), &Nodes[0], &Nodes[0] + Nodes.size());
+
+ // Find the BugReport with the original location.
+ BugReport *R = 0;
+ unsigned i = 0;
+ for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I, ++i)
+ if (i == GPair.second.second) { R = *I; break; }
+
+ assert(R && "No original report found for sliced graph.");
+
+ llvm::OwningPtr<ExplodedGraph<GRState> > ReportGraph(GPair.first.first);
+ llvm::OwningPtr<NodeBackMap> BackMap(GPair.first.second);
+ const ExplodedNode<GRState> *N = GPair.second.first;
+
+ // Start building the path diagnostic...
+ PathDiagnosticBuilder PDB(*this, R, BackMap.get(), getPathDiagnosticClient());
+
+ if (PathDiagnosticPiece* Piece = R->getEndPath(PDB, N))
+ PD.push_back(Piece);
+ else
+ return;
+
+ R->registerInitialVisitors(PDB, N);
+
+ switch (PDB.getGenerationScheme()) {
+ case PathDiagnosticClient::Extensive:
+ GenerateExtensivePathDiagnostic(PD, PDB, N);
+ break;
+ case PathDiagnosticClient::Minimal:
+ GenerateMinimalPathDiagnostic(PD, PDB, N);
+ break;
+ }
+}
+
+void BugReporter::Register(BugType *BT) {
+ BugTypes = F.Add(BugTypes, BT);
+}
+
+void BugReporter::EmitReport(BugReport* R) {
+ // Compute the bug report's hash to determine its equivalence class.
+ llvm::FoldingSetNodeID ID;
+ R->Profile(ID);
+
+ // Lookup the equivance class. If there isn't one, create it.
+ BugType& BT = R->getBugType();
+ Register(&BT);
+ void *InsertPos;
+ BugReportEquivClass* EQ = BT.EQClasses.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!EQ) {
+ EQ = new BugReportEquivClass(R);
+ BT.EQClasses.InsertNode(EQ, InsertPos);
+ }
+ else
+ EQ->AddReport(R);
+}
+
+void BugReporter::FlushReport(BugReportEquivClass& EQ) {
+ assert(!EQ.Reports.empty());
+ BugReport &R = **EQ.begin();
+ PathDiagnosticClient* PD = getPathDiagnosticClient();
+
+ // FIXME: Make sure we use the 'R' for the path that was actually used.
+ // Probably doesn't make a difference in practice.
+ BugType& BT = R.getBugType();
+
+ llvm::OwningPtr<PathDiagnostic>
+ D(new PathDiagnostic(R.getBugType().getName(),
+ !PD || PD->useVerboseDescription()
+ ? R.getDescription() : R.getShortDescription(),
+ BT.getCategory()));
+
+ GeneratePathDiagnostic(*D.get(), EQ);
+
+ // Get the meta data.
+ std::pair<const char**, const char**> Meta = R.getExtraDescriptiveText();
+ for (const char** s = Meta.first; s != Meta.second; ++s) D->addMeta(*s);
+
+ // Emit a summary diagnostic to the regular Diagnostics engine.
+ const SourceRange *Beg = 0, *End = 0;
+ R.getRanges(*this, Beg, End);
+ Diagnostic& Diag = getDiagnostic();
+ FullSourceLoc L(R.getLocation(), getSourceManager());
+ unsigned ErrorDiag = Diag.getCustomDiagID(Diagnostic::Warning,
+ R.getShortDescription().c_str());
+
+ switch (End-Beg) {
+ default: assert(0 && "Don't handle this many ranges yet!");
+ case 0: Diag.Report(L, ErrorDiag); break;
+ case 1: Diag.Report(L, ErrorDiag) << Beg[0]; break;
+ case 2: Diag.Report(L, ErrorDiag) << Beg[0] << Beg[1]; break;
+ case 3: Diag.Report(L, ErrorDiag) << Beg[0] << Beg[1] << Beg[2]; break;
+ }
+
+ // Emit a full diagnostic for the path if we have a PathDiagnosticClient.
+ if (!PD)
+ return;
+
+ if (D->empty()) {
+ PathDiagnosticPiece* piece =
+ new PathDiagnosticEventPiece(L, R.getDescription());
+
+ for ( ; Beg != End; ++Beg) piece->addRange(*Beg);
+ D->push_back(piece);
+ }
+
+ PD->HandlePathDiagnostic(D.take());
+}
+
+void BugReporter::EmitBasicReport(const char* name, const char* str,
+ SourceLocation Loc,
+ SourceRange* RBeg, unsigned NumRanges) {
+ EmitBasicReport(name, "", str, Loc, RBeg, NumRanges);
+}
+
+void BugReporter::EmitBasicReport(const char* name, const char* category,
+ const char* str, SourceLocation Loc,
+ SourceRange* RBeg, unsigned NumRanges) {
+
+ // 'BT' will be owned by BugReporter as soon as we call 'EmitReport'.
+ BugType *BT = new BugType(name, category);
+ FullSourceLoc L = getContext().getFullLoc(Loc);
+ RangedBugReport *R = new DiagBugReport(*BT, str, L);
+ for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg);
+ EmitReport(R);
+}
diff --git a/lib/Analysis/CFRefCount.cpp b/lib/Analysis/CFRefCount.cpp
new file mode 100644
index 0000000..30ff67f
--- /dev/null
+++ b/lib/Analysis/CFRefCount.cpp
@@ -0,0 +1,3635 @@
+// CFRefCount.cpp - Transfer functions for tracking simple values -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the methods for CFRefCount, which implements
+// a reference count checker for Core Foundation (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRSimpleVals.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Analysis/PathSensitive/GRExprEngineBuilders.h"
+#include "clang/Analysis/PathSensitive/GRStateTrait.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/PathSensitive/SymbolManager.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/STLExtras.h"
+#include <ostream>
+#include <stdarg.h>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+// The "fundamental rule" for naming conventions of methods:
+// (url broken into two lines)
+// http://developer.apple.com/documentation/Cocoa/Conceptual/
+// MemoryMgmt/Tasks/MemoryManagementRules.html
+//
+// "You take ownership of an object if you create it using a method whose name
+// begins with “alloc” or “new” or contains “copy” (for example, alloc,
+// newObject, or mutableCopy), or if you send it a retain message. You are
+// responsible for relinquishing ownership of objects you own using release
+// or autorelease. Any other time you receive an object, you must
+// not release it."
+//
+
+using llvm::CStrInCStrNoCase;
+using llvm::StringsEqualNoCase;
+
+enum NamingConvention { NoConvention, CreateRule, InitRule };
+
+static inline bool isWordEnd(char ch, char prev, char next) {
+ return ch == '\0'
+ || (islower(prev) && isupper(ch)) // xxxC
+ || (isupper(prev) && isupper(ch) && islower(next)) // XXCreate
+ || !isalpha(ch);
+}
+
+static inline const char* parseWord(const char* s) {
+ char ch = *s, prev = '\0';
+ assert(ch != '\0');
+ char next = *(s+1);
+ while (!isWordEnd(ch, prev, next)) {
+ prev = ch;
+ ch = next;
+ next = *((++s)+1);
+ }
+ return s;
+}
+
+static NamingConvention deriveNamingConvention(Selector S) {
+ IdentifierInfo *II = S.getIdentifierInfoForSlot(0);
+
+ if (!II)
+ return NoConvention;
+
+ const char *s = II->getName();
+
+ // A method/function name may contain a prefix. We don't know it is there,
+ // however, until we encounter the first '_'.
+ bool InPossiblePrefix = true;
+ bool AtBeginning = true;
+ NamingConvention C = NoConvention;
+
+ while (*s != '\0') {
+ // Skip '_'.
+ if (*s == '_') {
+ if (InPossiblePrefix) {
+ InPossiblePrefix = false;
+ AtBeginning = true;
+ // Discard whatever 'convention' we
+ // had already derived since it occurs
+ // in the prefix.
+ C = NoConvention;
+ }
+ ++s;
+ continue;
+ }
+
+ // Skip numbers, ':', etc.
+ if (!isalpha(*s)) {
+ ++s;
+ continue;
+ }
+
+ const char *wordEnd = parseWord(s);
+ assert(wordEnd > s);
+ unsigned len = wordEnd - s;
+
+ switch (len) {
+ default:
+ break;
+ case 3:
+ // Methods starting with 'new' follow the create rule.
+ if (AtBeginning && StringsEqualNoCase("new", s, len))
+ C = CreateRule;
+ break;
+ case 4:
+ // Methods starting with 'alloc' or contain 'copy' follow the
+ // create rule
+ if (C == NoConvention && StringsEqualNoCase("copy", s, len))
+ C = CreateRule;
+ else // Methods starting with 'init' follow the init rule.
+ if (AtBeginning && StringsEqualNoCase("init", s, len))
+ C = InitRule;
+ break;
+ case 5:
+ if (AtBeginning && StringsEqualNoCase("alloc", s, len))
+ C = CreateRule;
+ break;
+ }
+
+ // If we aren't in the prefix and have a derived convention then just
+ // return it now.
+ if (!InPossiblePrefix && C != NoConvention)
+ return C;
+
+ AtBeginning = false;
+ s = wordEnd;
+ }
+
+ // We will get here if there wasn't more than one word
+ // after the prefix.
+ return C;
+}
+
+static bool followsFundamentalRule(Selector S) {
+ return deriveNamingConvention(S) == CreateRule;
+}
+
+static const ObjCMethodDecl*
+ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD, ASTContext &Context) {
+ ObjCInterfaceDecl *ID =
+ const_cast<ObjCInterfaceDecl*>(MD->getClassInterface());
+
+ return MD->isInstanceMethod()
+ ? ID->lookupInstanceMethod(Context, MD->getSelector())
+ : ID->lookupClassMethod(Context, MD->getSelector());
+}
+
+namespace {
+class VISIBILITY_HIDDEN GenericNodeBuilder {
+ GRStmtNodeBuilder<GRState> *SNB;
+ Stmt *S;
+ const void *tag;
+ GREndPathNodeBuilder<GRState> *ENB;
+public:
+ GenericNodeBuilder(GRStmtNodeBuilder<GRState> &snb, Stmt *s,
+ const void *t)
+ : SNB(&snb), S(s), tag(t), ENB(0) {}
+ GenericNodeBuilder(GREndPathNodeBuilder<GRState> &enb)
+ : SNB(0), S(0), tag(0), ENB(&enb) {}
+
+ ExplodedNode<GRState> *MakeNode(const GRState *state,
+ ExplodedNode<GRState> *Pred) {
+ if (SNB)
+ return SNB->generateNode(PostStmt(S, tag), state, Pred);
+
+ assert(ENB);
+ return ENB->generateNode(state, Pred);
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Selector creation functions.
+//===----------------------------------------------------------------------===//
+
+static inline Selector GetNullarySelector(const char* name, ASTContext& Ctx) {
+ IdentifierInfo* II = &Ctx.Idents.get(name);
+ return Ctx.Selectors.getSelector(0, &II);
+}
+
+static inline Selector GetUnarySelector(const char* name, ASTContext& Ctx) {
+ IdentifierInfo* II = &Ctx.Idents.get(name);
+ return Ctx.Selectors.getSelector(1, &II);
+}
+
+//===----------------------------------------------------------------------===//
+// Type querying functions.
+//===----------------------------------------------------------------------===//
+
+static bool hasPrefix(const char* s, const char* prefix) {
+ if (!prefix)
+ return true;
+
+ char c = *s;
+ char cP = *prefix;
+
+ while (c != '\0' && cP != '\0') {
+ if (c != cP) break;
+ c = *(++s);
+ cP = *(++prefix);
+ }
+
+ return cP == '\0';
+}
+
+static bool hasSuffix(const char* s, const char* suffix) {
+ const char* loc = strstr(s, suffix);
+ return loc && strcmp(suffix, loc) == 0;
+}
+
+static bool isRefType(QualType RetTy, const char* prefix,
+ ASTContext* Ctx = 0, const char* name = 0) {
+
+ // Recursively walk the typedef stack, allowing typedefs of reference types.
+ while (1) {
+ if (TypedefType* TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) {
+ const char* TDName = TD->getDecl()->getIdentifier()->getName();
+ if (hasPrefix(TDName, prefix) && hasSuffix(TDName, "Ref"))
+ return true;
+
+ RetTy = TD->getDecl()->getUnderlyingType();
+ continue;
+ }
+ break;
+ }
+
+ if (!Ctx || !name)
+ return false;
+
+ // Is the type void*?
+ const PointerType* PT = RetTy->getAsPointerType();
+ if (!(PT->getPointeeType().getUnqualifiedType() == Ctx->VoidTy))
+ return false;
+
+ // Does the name start with the prefix?
+ return hasPrefix(name, prefix);
+}
+
+//===----------------------------------------------------------------------===//
+// Primitives used for constructing summaries for function/method calls.
+//===----------------------------------------------------------------------===//
+
+/// ArgEffect is used to summarize a function/method call's effect on a
+/// particular argument.
+enum ArgEffect { Autorelease, Dealloc, DecRef, DecRefMsg, DoNothing,
+ DoNothingByRef, IncRefMsg, IncRef, MakeCollectable, MayEscape,
+ NewAutoreleasePool, SelfOwn, StopTracking };
+
+namespace llvm {
+template <> struct FoldingSetTrait<ArgEffect> {
+static inline void Profile(const ArgEffect X, FoldingSetNodeID& ID) {
+ ID.AddInteger((unsigned) X);
+}
+};
+} // end llvm namespace
+
+/// ArgEffects summarizes the effects of a function/method call on all of
+/// its arguments.
+typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
+
+namespace {
+
+/// RetEffect is used to summarize a function/method call's behavior with
+/// respect to its return value.
+class VISIBILITY_HIDDEN RetEffect {
+public:
+ enum Kind { NoRet, Alias, OwnedSymbol, OwnedAllocatedSymbol,
+ NotOwnedSymbol, GCNotOwnedSymbol, ReceiverAlias,
+ OwnedWhenTrackedReceiver };
+
+ enum ObjKind { CF, ObjC, AnyObj };
+
+private:
+ Kind K;
+ ObjKind O;
+ unsigned index;
+
+ RetEffect(Kind k, unsigned idx = 0) : K(k), O(AnyObj), index(idx) {}
+ RetEffect(Kind k, ObjKind o) : K(k), O(o), index(0) {}
+
+public:
+ Kind getKind() const { return K; }
+
+ ObjKind getObjKind() const { return O; }
+
+ unsigned getIndex() const {
+ assert(getKind() == Alias);
+ return index;
+ }
+
+ bool isOwned() const {
+ return K == OwnedSymbol || K == OwnedAllocatedSymbol ||
+ K == OwnedWhenTrackedReceiver;
+ }
+
+ static RetEffect MakeOwnedWhenTrackedReceiver() {
+ return RetEffect(OwnedWhenTrackedReceiver, ObjC);
+ }
+
+ static RetEffect MakeAlias(unsigned Idx) {
+ return RetEffect(Alias, Idx);
+ }
+ static RetEffect MakeReceiverAlias() {
+ return RetEffect(ReceiverAlias);
+ }
+ static RetEffect MakeOwned(ObjKind o, bool isAllocated = false) {
+ return RetEffect(isAllocated ? OwnedAllocatedSymbol : OwnedSymbol, o);
+ }
+ static RetEffect MakeNotOwned(ObjKind o) {
+ return RetEffect(NotOwnedSymbol, o);
+ }
+ static RetEffect MakeGCNotOwned() {
+ return RetEffect(GCNotOwnedSymbol, ObjC);
+ }
+
+ static RetEffect MakeNoRet() {
+ return RetEffect(NoRet);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned)K);
+ ID.AddInteger((unsigned)O);
+ ID.AddInteger(index);
+ }
+};
+
+
+class VISIBILITY_HIDDEN RetainSummary {
+ /// Args - an ordered vector of (index, ArgEffect) pairs, where index
+ /// specifies the argument (starting from 0). This can be sparsely
+ /// populated; arguments with no entry in Args use 'DefaultArgEffect'.
+ ArgEffects Args;
+
+ /// DefaultArgEffect - The default ArgEffect to apply to arguments that
+ /// do not have an entry in Args.
+ ArgEffect DefaultArgEffect;
+
+ /// Receiver - If this summary applies to an Objective-C message expression,
+ /// this is the effect applied to the state of the receiver.
+ ArgEffect Receiver;
+
+ /// Ret - The effect on the return value. Used to indicate if the
+ /// function/method call returns a new tracked symbol, returns an
+ /// alias of one of the arguments in the call, and so on.
+ RetEffect Ret;
+
+ /// EndPath - Indicates that execution of this method/function should
+ /// terminate the simulation of a path.
+ bool EndPath;
+
+public:
+ RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
+ ArgEffect ReceiverEff, bool endpath = false)
+ : Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R),
+ EndPath(endpath) {}
+
+ /// getArg - Return the argument effect on the argument specified by
+ /// idx (starting from 0).
+ ArgEffect getArg(unsigned idx) const {
+ if (const ArgEffect *AE = Args.lookup(idx))
+ return *AE;
+
+ return DefaultArgEffect;
+ }
+
+ /// setDefaultArgEffect - Set the default argument effect.
+ void setDefaultArgEffect(ArgEffect E) {
+ DefaultArgEffect = E;
+ }
+
+ /// setArg - Set the argument effect on the argument specified by idx.
+ void setArgEffect(ArgEffects::Factory& AF, unsigned idx, ArgEffect E) {
+ Args = AF.Add(Args, idx, E);
+ }
+
+ /// getRetEffect - Returns the effect on the return value of the call.
+ RetEffect getRetEffect() const { return Ret; }
+
+ /// setRetEffect - Set the effect of the return value of the call.
+ void setRetEffect(RetEffect E) { Ret = E; }
+
+ /// isEndPath - Returns true if executing the given method/function should
+ /// terminate the path.
+ bool isEndPath() const { return EndPath; }
+
+ /// getReceiverEffect - Returns the effect on the receiver of the call.
+ /// This is only meaningful if the summary applies to an ObjCMessageExpr*.
+ ArgEffect getReceiverEffect() const { return Receiver; }
+
+ /// setReceiverEffect - Set the effect on the receiver of the call.
+ void setReceiverEffect(ArgEffect E) { Receiver = E; }
+
+ typedef ArgEffects::iterator ExprIterator;
+
+ ExprIterator begin_args() const { return Args.begin(); }
+ ExprIterator end_args() const { return Args.end(); }
+
+ static void Profile(llvm::FoldingSetNodeID& ID, ArgEffects A,
+ RetEffect RetEff, ArgEffect DefaultEff,
+ ArgEffect ReceiverEff, bool EndPath) {
+ ID.Add(A);
+ ID.Add(RetEff);
+ ID.AddInteger((unsigned) DefaultEff);
+ ID.AddInteger((unsigned) ReceiverEff);
+ ID.AddInteger((unsigned) EndPath);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ Profile(ID, Args, Ret, DefaultArgEffect, Receiver, EndPath);
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for constructing summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN ObjCSummaryKey {
+ IdentifierInfo* II;
+ Selector S;
+public:
+ ObjCSummaryKey(IdentifierInfo* ii, Selector s)
+ : II(ii), S(s) {}
+
+ ObjCSummaryKey(const ObjCInterfaceDecl* d, Selector s)
+ : II(d ? d->getIdentifier() : 0), S(s) {}
+
+ ObjCSummaryKey(const ObjCInterfaceDecl* d, IdentifierInfo *ii, Selector s)
+ : II(d ? d->getIdentifier() : ii), S(s) {}
+
+ ObjCSummaryKey(Selector s)
+ : II(0), S(s) {}
+
+ IdentifierInfo* getIdentifier() const { return II; }
+ Selector getSelector() const { return S; }
+};
+}
+
+namespace llvm {
+template <> struct DenseMapInfo<ObjCSummaryKey> {
+ static inline ObjCSummaryKey getEmptyKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
+ DenseMapInfo<Selector>::getEmptyKey());
+ }
+
+ static inline ObjCSummaryKey getTombstoneKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
+ DenseMapInfo<Selector>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const ObjCSummaryKey &V) {
+ return (DenseMapInfo<IdentifierInfo*>::getHashValue(V.getIdentifier())
+ & 0x88888888)
+ | (DenseMapInfo<Selector>::getHashValue(V.getSelector())
+ & 0x55555555);
+ }
+
+ static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
+ return DenseMapInfo<IdentifierInfo*>::isEqual(LHS.getIdentifier(),
+ RHS.getIdentifier()) &&
+ DenseMapInfo<Selector>::isEqual(LHS.getSelector(),
+ RHS.getSelector());
+ }
+
+ static bool isPod() {
+ return DenseMapInfo<ObjCInterfaceDecl*>::isPod() &&
+ DenseMapInfo<Selector>::isPod();
+ }
+};
+} // end llvm namespace
+
+namespace {
+class VISIBILITY_HIDDEN ObjCSummaryCache {
+ typedef llvm::DenseMap<ObjCSummaryKey, RetainSummary*> MapTy;
+ MapTy M;
+public:
+ ObjCSummaryCache() {}
+
+ typedef MapTy::iterator iterator;
+
+ iterator find(const ObjCInterfaceDecl* D, IdentifierInfo *ClsName,
+ Selector S) {
+ // Lookup the method using the decl for the class @interface. If we
+ // have no decl, lookup using the class name.
+ return D ? find(D, S) : find(ClsName, S);
+ }
+
+ iterator find(const ObjCInterfaceDecl* D, Selector S) {
+ // Do a lookup with the (D,S) pair. If we find a match return
+ // the iterator.
+ ObjCSummaryKey K(D, S);
+ MapTy::iterator I = M.find(K);
+
+ if (I != M.end() || !D)
+ return I;
+
+ // Walk the super chain. If we find a hit with a parent, we'll end
+ // up returning that summary. We actually allow that key (null,S), as
+ // we cache summaries for the null ObjCInterfaceDecl* to allow us to
+ // generate initial summaries without having to worry about NSObject
+ // being declared.
+ // FIXME: We may change this at some point.
+ for (ObjCInterfaceDecl* C=D->getSuperClass() ;; C=C->getSuperClass()) {
+ if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
+ break;
+
+ if (!C)
+ return I;
+ }
+
+ // Cache the summary with original key to make the next lookup faster
+ // and return the iterator.
+ M[K] = I->second;
+ return I;
+ }
+
+
+ iterator find(Expr* Receiver, Selector S) {
+ return find(getReceiverDecl(Receiver), S);
+ }
+
+ iterator find(IdentifierInfo* II, Selector S) {
+ // FIXME: Class method lookup. Right now we dont' have a good way
+ // of going between IdentifierInfo* and the class hierarchy.
+ iterator I = M.find(ObjCSummaryKey(II, S));
+ return I == M.end() ? M.find(ObjCSummaryKey(S)) : I;
+ }
+
+ ObjCInterfaceDecl* getReceiverDecl(Expr* E) {
+
+ const PointerType* PT = E->getType()->getAsPointerType();
+ if (!PT) return 0;
+
+ ObjCInterfaceType* OI = dyn_cast<ObjCInterfaceType>(PT->getPointeeType());
+ if (!OI) return 0;
+
+ return OI ? OI->getDecl() : 0;
+ }
+
+ iterator end() { return M.end(); }
+
+ RetainSummary*& operator[](ObjCMessageExpr* ME) {
+
+ Selector S = ME->getSelector();
+
+ if (Expr* Receiver = ME->getReceiver()) {
+ ObjCInterfaceDecl* OD = getReceiverDecl(Receiver);
+ return OD ? M[ObjCSummaryKey(OD->getIdentifier(), S)] : M[S];
+ }
+
+ return M[ObjCSummaryKey(ME->getClassName(), S)];
+ }
+
+ RetainSummary*& operator[](ObjCSummaryKey K) {
+ return M[K];
+ }
+
+ RetainSummary*& operator[](Selector S) {
+ return M[ ObjCSummaryKey(S) ];
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for managing collections of summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN RetainSummaryManager {
+
+ //==-----------------------------------------------------------------==//
+ // Typedefs.
+ //==-----------------------------------------------------------------==//
+
+ typedef llvm::DenseMap<FunctionDecl*, RetainSummary*>
+ FuncSummariesTy;
+
+ typedef ObjCSummaryCache ObjCMethodSummariesTy;
+
+ //==-----------------------------------------------------------------==//
+ // Data.
+ //==-----------------------------------------------------------------==//
+
+ /// Ctx - The ASTContext object for the analyzed ASTs.
+ ASTContext& Ctx;
+
+ /// CFDictionaryCreateII - An IdentifierInfo* representing the indentifier
+ /// "CFDictionaryCreate".
+ IdentifierInfo* CFDictionaryCreateII;
+
+ /// GCEnabled - Records whether or not the analyzed code runs in GC mode.
+ const bool GCEnabled;
+
+ /// FuncSummaries - A map from FunctionDecls to summaries.
+ FuncSummariesTy FuncSummaries;
+
+ /// ObjCClassMethodSummaries - A map from selectors (for instance methods)
+ /// to summaries.
+ ObjCMethodSummariesTy ObjCClassMethodSummaries;
+
+ /// ObjCMethodSummaries - A map from selectors to summaries.
+ ObjCMethodSummariesTy ObjCMethodSummaries;
+
+ /// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
+ /// and all other data used by the checker.
+ llvm::BumpPtrAllocator BPAlloc;
+
+ /// AF - A factory for ArgEffects objects.
+ ArgEffects::Factory AF;
+
+ /// ScratchArgs - A holding buffer for construct ArgEffects.
+ ArgEffects ScratchArgs;
+
+ /// ObjCAllocRetE - Default return effect for methods returning Objective-C
+ /// objects.
+ RetEffect ObjCAllocRetE;
+
+ RetainSummary DefaultSummary;
+ RetainSummary* StopSummary;
+
+ //==-----------------------------------------------------------------==//
+ // Methods.
+ //==-----------------------------------------------------------------==//
+
+ /// getArgEffects - Returns a persistent ArgEffects object based on the
+ /// data in ScratchArgs.
+ ArgEffects getArgEffects();
+
+ enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
+
+public:
+ RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
+
+ RetainSummary *getDefaultSummary() {
+ RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
+ return new (Summ) RetainSummary(DefaultSummary);
+ }
+
+ RetainSummary* getUnarySummary(const FunctionType* FT, UnaryFuncKind func);
+
+ RetainSummary* getCFSummaryCreateRule(FunctionDecl* FD);
+ RetainSummary* getCFSummaryGetRule(FunctionDecl* FD);
+ RetainSummary* getCFCreateGetRuleSummary(FunctionDecl* FD, const char* FName);
+
+ RetainSummary* getPersistentSummary(ArgEffects AE, RetEffect RetEff,
+ ArgEffect ReceiverEff = DoNothing,
+ ArgEffect DefaultEff = MayEscape,
+ bool isEndPath = false);
+
+ RetainSummary* getPersistentSummary(RetEffect RE,
+ ArgEffect ReceiverEff = DoNothing,
+ ArgEffect DefaultEff = MayEscape) {
+ return getPersistentSummary(getArgEffects(), RE, ReceiverEff, DefaultEff);
+ }
+
+ RetainSummary *getPersistentStopSummary() {
+ if (StopSummary)
+ return StopSummary;
+
+ StopSummary = getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking, StopTracking);
+
+ return StopSummary;
+ }
+
+ RetainSummary *getInitMethodSummary(QualType RetTy);
+
+ void InitializeClassMethodSummaries();
+ void InitializeMethodSummaries();
+
+ bool isTrackedObjCObjectType(QualType T);
+ bool isTrackedCFObjectType(QualType T);
+
+private:
+
+ void addClsMethSummary(IdentifierInfo* ClsII, Selector S,
+ RetainSummary* Summ) {
+ ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addNSObjectClsMethSummary(Selector S, RetainSummary *Summ) {
+ ObjCClassMethodSummaries[S] = Summ;
+ }
+
+ void addNSObjectMethSummary(Selector S, RetainSummary *Summ) {
+ ObjCMethodSummaries[S] = Summ;
+ }
+
+ void addClassMethSummary(const char* Cls, const char* nullaryName,
+ RetainSummary *Summ) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = GetNullarySelector(nullaryName, Ctx);
+ ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, const char* nullaryName,
+ RetainSummary *Summ) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = GetNullarySelector(nullaryName, Ctx);
+ ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ Selector generateSelector(va_list argp) {
+ llvm::SmallVector<IdentifierInfo*, 10> II;
+
+ while (const char* s = va_arg(argp, const char*))
+ II.push_back(&Ctx.Idents.get(s));
+
+ return Ctx.Selectors.getSelector(II.size(), &II[0]);
+ }
+
+ void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy& Summaries,
+ RetainSummary* Summ, va_list argp) {
+ Selector S = generateSelector(argp);
+ Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, RetainSummary* Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(const char* Cls, RetainSummary* Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(IdentifierInfo *II, RetainSummary* Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(II, ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addPanicSummary(const char* Cls, ...) {
+ RetainSummary* Summ = getPersistentSummary(AF.GetEmptyMap(),
+ RetEffect::MakeNoRet(),
+ DoNothing, DoNothing, true);
+ va_list argp;
+ va_start (argp, Cls);
+ addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+public:
+
+ RetainSummaryManager(ASTContext& ctx, bool gcenabled)
+ : Ctx(ctx),
+ CFDictionaryCreateII(&ctx.Idents.get("CFDictionaryCreate")),
+ GCEnabled(gcenabled), AF(BPAlloc), ScratchArgs(AF.GetEmptyMap()),
+ ObjCAllocRetE(gcenabled ? RetEffect::MakeGCNotOwned()
+ : RetEffect::MakeOwned(RetEffect::ObjC, true)),
+ DefaultSummary(AF.GetEmptyMap() /* per-argument effects (none) */,
+ RetEffect::MakeNoRet() /* return effect */,
+ MayEscape, /* default argument effect */
+ DoNothing /* receiver effect */),
+ StopSummary(0) {
+
+ InitializeClassMethodSummaries();
+ InitializeMethodSummaries();
+ }
+
+ ~RetainSummaryManager();
+
+ RetainSummary* getSummary(FunctionDecl* FD);
+
+ RetainSummary* getInstanceMethodSummary(ObjCMessageExpr* ME,
+ const ObjCInterfaceDecl* ID) {
+ return getInstanceMethodSummary(ME->getSelector(), ME->getClassName(),
+ ID, ME->getMethodDecl(), ME->getType());
+ }
+
+ RetainSummary* getInstanceMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl* ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy);
+
+ RetainSummary *getClassMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy);
+
+ RetainSummary *getClassMethodSummary(ObjCMessageExpr *ME) {
+ return getClassMethodSummary(ME->getSelector(), ME->getClassName(),
+ ME->getClassInfo().first,
+ ME->getMethodDecl(), ME->getType());
+ }
+
+ /// getMethodSummary - This version of getMethodSummary is used to query
+ /// the summary for the current method being analyzed.
+ RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
+ // FIXME: Eventually this should be unneeded.
+ const ObjCInterfaceDecl *ID = MD->getClassInterface();
+ Selector S = MD->getSelector();
+ IdentifierInfo *ClsName = ID->getIdentifier();
+ QualType ResultTy = MD->getResultType();
+
+ // Resolve the method decl last.
+ if (const ObjCMethodDecl *InterfaceMD =
+ ResolveToInterfaceMethodDecl(MD, Ctx))
+ MD = InterfaceMD;
+
+ if (MD->isInstanceMethod())
+ return getInstanceMethodSummary(S, ClsName, ID, MD, ResultTy);
+ else
+ return getClassMethodSummary(S, ClsName, ID, MD, ResultTy);
+ }
+
+ RetainSummary* getCommonMethodSummary(const ObjCMethodDecl* MD,
+ Selector S, QualType RetTy);
+
+ void updateSummaryFromAnnotations(RetainSummary &Summ,
+ const ObjCMethodDecl *MD);
+
+ void updateSummaryFromAnnotations(RetainSummary &Summ,
+ const FunctionDecl *FD);
+
+ bool isGCEnabled() const { return GCEnabled; }
+
+ RetainSummary *copySummary(RetainSummary *OldSumm) {
+ RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
+ new (Summ) RetainSummary(*OldSumm);
+ return Summ;
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Implementation of checker data structures.
+//===----------------------------------------------------------------------===//
+
+RetainSummaryManager::~RetainSummaryManager() {}
+
+ArgEffects RetainSummaryManager::getArgEffects() {
+ ArgEffects AE = ScratchArgs;
+ ScratchArgs = AF.GetEmptyMap();
+ return AE;
+}
+
+RetainSummary*
+RetainSummaryManager::getPersistentSummary(ArgEffects AE, RetEffect RetEff,
+ ArgEffect ReceiverEff,
+ ArgEffect DefaultEff,
+ bool isEndPath) {
+ // Create the summary and return it.
+ RetainSummary *Summ = (RetainSummary*) BPAlloc.Allocate<RetainSummary>();
+ new (Summ) RetainSummary(AE, RetEff, DefaultEff, ReceiverEff, isEndPath);
+ return Summ;
+}
+
+//===----------------------------------------------------------------------===//
+// Predicates.
+//===----------------------------------------------------------------------===//
+
+bool RetainSummaryManager::isTrackedObjCObjectType(QualType Ty) {
+ if (!Ctx.isObjCObjectPointerType(Ty))
+ return false;
+
+ // We assume that id<..>, id, and "Class" all represent tracked objects.
+ const PointerType *PT = Ty->getAsPointerType();
+ if (PT == 0)
+ return true;
+
+ const ObjCInterfaceType *OT = PT->getPointeeType()->getAsObjCInterfaceType();
+
+ // We assume that id<..>, id, and "Class" all represent tracked objects.
+ if (!OT)
+ return true;
+
+ // Does the interface subclass NSObject?
+ // FIXME: We can memoize here if this gets too expensive.
+ ObjCInterfaceDecl* ID = OT->getDecl();
+
+ // Assume that anything declared with a forward declaration and no
+ // @interface subclasses NSObject.
+ if (ID->isForwardDecl())
+ return true;
+
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+
+
+ for ( ; ID ; ID = ID->getSuperClass())
+ if (ID->getIdentifier() == NSObjectII)
+ return true;
+
+ return false;
+}
+
+bool RetainSummaryManager::isTrackedCFObjectType(QualType T) {
+ return isRefType(T, "CF") || // Core Foundation.
+ isRefType(T, "CG") || // Core Graphics.
+ isRefType(T, "DADisk") || // Disk Arbitration API.
+ isRefType(T, "DADissenter") ||
+ isRefType(T, "DASessionRef");
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for functions (largely uses of Core Foundation).
+//===----------------------------------------------------------------------===//
+
+static bool isRetain(FunctionDecl* FD, const char* FName) {
+ const char* loc = strstr(FName, "Retain");
+ return loc && loc[sizeof("Retain")-1] == '\0';
+}
+
+static bool isRelease(FunctionDecl* FD, const char* FName) {
+ const char* loc = strstr(FName, "Release");
+ return loc && loc[sizeof("Release")-1] == '\0';
+}
+
+RetainSummary* RetainSummaryManager::getSummary(FunctionDecl* FD) {
+ // Look up a summary in our cache of FunctionDecls -> Summaries.
+ FuncSummariesTy::iterator I = FuncSummaries.find(FD);
+ if (I != FuncSummaries.end())
+ return I->second;
+
+ // No summary? Generate one.
+ RetainSummary *S = 0;
+
+ do {
+ // We generate "stop" summaries for implicitly defined functions.
+ if (FD->isImplicit()) {
+ S = getPersistentStopSummary();
+ break;
+ }
+
+ // [PR 3337] Use 'getAsFunctionType' to strip away any typedefs on the
+ // function's type.
+ const FunctionType* FT = FD->getType()->getAsFunctionType();
+ const char* FName = FD->getIdentifier()->getName();
+
+ // Strip away preceding '_'. Doing this here will effect all the checks
+ // down below.
+ while (*FName == '_') ++FName;
+
+ // Inspect the result type.
+ QualType RetTy = FT->getResultType();
+
+ // FIXME: This should all be refactored into a chain of "summary lookup"
+ // filters.
+ if (strcmp(FName, "IOServiceGetMatchingServices") == 0) {
+ // FIXES: <rdar://problem/6326900>
+ // This should be addressed using a API table. This strcmp is also
+ // a little gross, but there is no need to super optimize here.
+ assert (ScratchArgs.isEmpty());
+ ScratchArgs = AF.Add(ScratchArgs, 1, DecRef);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ break;
+ }
+
+ // Enable this code once the semantics of NSDeallocateObject are resolved
+ // for GC. <rdar://problem/6619988>
+#if 0
+ // Handle: NSDeallocateObject(id anObject);
+ // This method does allow 'nil' (although we don't check it now).
+ if (strcmp(FName, "NSDeallocateObject") == 0) {
+ return RetTy == Ctx.VoidTy
+ ? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc)
+ : getPersistentStopSummary();
+ }
+#endif
+
+ // Handle: id NSMakeCollectable(CFTypeRef)
+ if (strcmp(FName, "NSMakeCollectable") == 0) {
+ S = (RetTy == Ctx.getObjCIdType())
+ ? getUnarySummary(FT, cfmakecollectable)
+ : getPersistentStopSummary();
+
+ break;
+ }
+
+ if (RetTy->isPointerType()) {
+ // For CoreFoundation ('CF') types.
+ if (isRefType(RetTy, "CF", &Ctx, FName)) {
+ if (isRetain(FD, FName))
+ S = getUnarySummary(FT, cfretain);
+ else if (strstr(FName, "MakeCollectable"))
+ S = getUnarySummary(FT, cfmakecollectable);
+ else
+ S = getCFCreateGetRuleSummary(FD, FName);
+
+ break;
+ }
+
+ // For CoreGraphics ('CG') types.
+ if (isRefType(RetTy, "CG", &Ctx, FName)) {
+ if (isRetain(FD, FName))
+ S = getUnarySummary(FT, cfretain);
+ else
+ S = getCFCreateGetRuleSummary(FD, FName);
+
+ break;
+ }
+
+ // For the Disk Arbitration API (DiskArbitration/DADisk.h)
+ if (isRefType(RetTy, "DADisk") ||
+ isRefType(RetTy, "DADissenter") ||
+ isRefType(RetTy, "DASessionRef")) {
+ S = getCFCreateGetRuleSummary(FD, FName);
+ break;
+ }
+
+ break;
+ }
+
+ // Check for release functions, the only kind of functions that we care
+ // about that don't return a pointer type.
+ if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) {
+ // Test for 'CGCF'.
+ if (FName[1] == 'G' && FName[2] == 'C' && FName[3] == 'F')
+ FName += 4;
+ else
+ FName += 2;
+
+ if (isRelease(FD, FName))
+ S = getUnarySummary(FT, cfrelease);
+ else {
+ assert (ScratchArgs.isEmpty());
+ // Remaining CoreFoundation and CoreGraphics functions.
+ // We use to assume that they all strictly followed the ownership idiom
+ // and that ownership cannot be transferred. While this is technically
+ // correct, many methods allow a tracked object to escape. For example:
+ //
+ // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+ // CFDictionaryAddValue(y, key, x);
+ // CFRelease(x);
+ // ... it is okay to use 'x' since 'y' has a reference to it
+ //
+ // We handle this and similar cases with the follow heuristic. If the
+ // function name contains "InsertValue", "SetValue" or "AddValue" then
+ // we assume that arguments may "escape."
+ //
+ ArgEffect E = (CStrInCStrNoCase(FName, "InsertValue") ||
+ CStrInCStrNoCase(FName, "AddValue") ||
+ CStrInCStrNoCase(FName, "SetValue") ||
+ CStrInCStrNoCase(FName, "AppendValue"))
+ ? MayEscape : DoNothing;
+
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
+ }
+ }
+ }
+ while (0);
+
+ if (!S)
+ S = getDefaultSummary();
+
+ // Annotations override defaults.
+ assert(S);
+ updateSummaryFromAnnotations(*S, FD);
+
+ FuncSummaries[FD] = S;
+ return S;
+}
+
+RetainSummary*
+RetainSummaryManager::getCFCreateGetRuleSummary(FunctionDecl* FD,
+ const char* FName) {
+
+ if (strstr(FName, "Create") || strstr(FName, "Copy"))
+ return getCFSummaryCreateRule(FD);
+
+ if (strstr(FName, "Get"))
+ return getCFSummaryGetRule(FD);
+
+ return getDefaultSummary();
+}
+
+RetainSummary*
+RetainSummaryManager::getUnarySummary(const FunctionType* FT,
+ UnaryFuncKind func) {
+
+ // Sanity check that this is *really* a unary function. This can
+ // happen if people do weird things.
+ const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
+ if (!FTP || FTP->getNumArgs() != 1)
+ return getPersistentStopSummary();
+
+ assert (ScratchArgs.isEmpty());
+
+ switch (func) {
+ case cfretain: {
+ ScratchArgs = AF.Add(ScratchArgs, 0, IncRef);
+ return getPersistentSummary(RetEffect::MakeAlias(0),
+ DoNothing, DoNothing);
+ }
+
+ case cfrelease: {
+ ScratchArgs = AF.Add(ScratchArgs, 0, DecRef);
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, DoNothing);
+ }
+
+ case cfmakecollectable: {
+ ScratchArgs = AF.Add(ScratchArgs, 0, MakeCollectable);
+ return getPersistentSummary(RetEffect::MakeAlias(0),DoNothing, DoNothing);
+ }
+
+ default:
+ assert (false && "Not a supported unary function.");
+ return getDefaultSummary();
+ }
+}
+
+RetainSummary* RetainSummaryManager::getCFSummaryCreateRule(FunctionDecl* FD) {
+ assert (ScratchArgs.isEmpty());
+
+ if (FD->getIdentifier() == CFDictionaryCreateII) {
+ ScratchArgs = AF.Add(ScratchArgs, 1, DoNothingByRef);
+ ScratchArgs = AF.Add(ScratchArgs, 2, DoNothingByRef);
+ }
+
+ return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+}
+
+RetainSummary* RetainSummaryManager::getCFSummaryGetRule(FunctionDecl* FD) {
+ assert (ScratchArgs.isEmpty());
+ return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
+ DoNothing, DoNothing);
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for Selectors.
+//===----------------------------------------------------------------------===//
+
+RetainSummary*
+RetainSummaryManager::getInitMethodSummary(QualType RetTy) {
+ assert(ScratchArgs.isEmpty());
+ // 'init' methods conceptually return a newly allocated object and claim
+ // the receiver.
+ if (isTrackedObjCObjectType(RetTy) || isTrackedCFObjectType(RetTy))
+ return getPersistentSummary(RetEffect::MakeOwnedWhenTrackedReceiver(),
+ DecRefMsg);
+
+ return getDefaultSummary();
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
+ const FunctionDecl *FD) {
+ if (!FD)
+ return;
+
+ // Determine if there is a special return effect for this method.
+ if (isTrackedObjCObjectType(FD->getResultType())) {
+ if (FD->getAttr<NSReturnsRetainedAttr>()) {
+ Summ.setRetEffect(ObjCAllocRetE);
+ }
+ else if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ }
+ }
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
+ const ObjCMethodDecl *MD) {
+ if (!MD)
+ return;
+
+ // Determine if there is a special return effect for this method.
+ if (isTrackedObjCObjectType(MD->getResultType())) {
+ if (MD->getAttr<NSReturnsRetainedAttr>()) {
+ Summ.setRetEffect(ObjCAllocRetE);
+ }
+ else if (MD->getAttr<CFReturnsRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ }
+ }
+}
+
+RetainSummary*
+RetainSummaryManager::getCommonMethodSummary(const ObjCMethodDecl* MD,
+ Selector S, QualType RetTy) {
+
+ if (MD) {
+ // Scan the method decl for 'void*' arguments. These should be treated
+ // as 'StopTracking' because they are often used with delegates.
+ // Delegates are a frequent form of false positives with the retain
+ // count checker.
+ unsigned i = 0;
+ for (ObjCMethodDecl::param_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I, ++i)
+ if (ParmVarDecl *PD = *I) {
+ QualType Ty = Ctx.getCanonicalType(PD->getType());
+ if (Ty.getUnqualifiedType() == Ctx.VoidPtrTy)
+ ScratchArgs = AF.Add(ScratchArgs, i, StopTracking);
+ }
+ }
+
+ // Any special effect for the receiver?
+ ArgEffect ReceiverEff = DoNothing;
+
+ // If one of the arguments in the selector has the keyword 'delegate' we
+ // should stop tracking the reference count for the receiver. This is
+ // because the reference count is quite possibly handled by a delegate
+ // method.
+ if (S.isKeywordSelector()) {
+ const std::string &str = S.getAsString();
+ assert(!str.empty());
+ if (CStrInCStrNoCase(&str[0], "delegate:")) ReceiverEff = StopTracking;
+ }
+
+ // Look for methods that return an owned object.
+ if (isTrackedObjCObjectType(RetTy)) {
+ // EXPERIMENTAL: Assume the Cocoa conventions for all objects returned
+ // by instance methods.
+ RetEffect E = followsFundamentalRule(S)
+ ? ObjCAllocRetE : RetEffect::MakeNotOwned(RetEffect::ObjC);
+
+ return getPersistentSummary(E, ReceiverEff, MayEscape);
+ }
+
+ // Look for methods that return an owned core foundation object.
+ if (isTrackedCFObjectType(RetTy)) {
+ RetEffect E = followsFundamentalRule(S)
+ ? RetEffect::MakeOwned(RetEffect::CF, true)
+ : RetEffect::MakeNotOwned(RetEffect::CF);
+
+ return getPersistentSummary(E, ReceiverEff, MayEscape);
+ }
+
+ if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing)
+ return getDefaultSummary();
+
+ return getPersistentSummary(RetEffect::MakeNoRet(), ReceiverEff, MayEscape);
+}
+
+RetainSummary*
+RetainSummaryManager::getInstanceMethodSummary(Selector S,
+ IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl* ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy) {
+
+ // Look up a summary in our summary cache.
+ ObjCMethodSummariesTy::iterator I = ObjCMethodSummaries.find(ID, ClsName, S);
+
+ if (I != ObjCMethodSummaries.end())
+ return I->second;
+
+ assert(ScratchArgs.isEmpty());
+ RetainSummary *Summ = 0;
+
+ // "initXXX": pass-through for receiver.
+ if (deriveNamingConvention(S) == InitRule)
+ Summ = getInitMethodSummary(RetTy);
+ else
+ Summ = getCommonMethodSummary(MD, S, RetTy);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(*Summ, MD);
+
+ // Memoize the summary.
+ ObjCMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ return Summ;
+}
+
+RetainSummary*
+RetainSummaryManager::getClassMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy) {
+
+ assert(ClsName && "Class name must be specified.");
+ ObjCMethodSummariesTy::iterator I =
+ ObjCClassMethodSummaries.find(ID, ClsName, S);
+
+ if (I != ObjCClassMethodSummaries.end())
+ return I->second;
+
+ RetainSummary *Summ = getCommonMethodSummary(MD, S, RetTy);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(*Summ, MD);
+
+ // Memoize the summary.
+ ObjCClassMethodSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ return Summ;
+}
+
+void RetainSummaryManager::InitializeClassMethodSummaries() {
+ assert(ScratchArgs.isEmpty());
+ RetainSummary* Summ = getPersistentSummary(ObjCAllocRetE);
+
+ // Create the summaries for "alloc", "new", and "allocWithZone:" for
+ // NSObject and its derivatives.
+ addNSObjectClsMethSummary(GetNullarySelector("alloc", Ctx), Summ);
+ addNSObjectClsMethSummary(GetNullarySelector("new", Ctx), Summ);
+ addNSObjectClsMethSummary(GetUnarySelector("allocWithZone", Ctx), Summ);
+
+ // Create the [NSAssertionHandler currentHander] summary.
+ addClsMethSummary(&Ctx.Idents.get("NSAssertionHandler"),
+ GetNullarySelector("currentHandler", Ctx),
+ getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
+
+ // Create the [NSAutoreleasePool addObject:] summary.
+ ScratchArgs = AF.Add(ScratchArgs, 0, Autorelease);
+ addClsMethSummary(&Ctx.Idents.get("NSAutoreleasePool"),
+ GetUnarySelector("addObject", Ctx),
+ getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, Autorelease));
+
+ // Create the summaries for [NSObject performSelector...]. We treat
+ // these as 'stop tracking' for the arguments because they are often
+ // used for delegates that can release the object. When we have better
+ // inter-procedural analysis we can potentially do something better. This
+ // workaround is to remove false positives.
+ Summ = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, StopTracking);
+ IdentifierInfo *NSObjectII = &Ctx.Idents.get("NSObject");
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
+ "afterDelay", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
+ "afterDelay", "inModes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
+ "withObject", "waitUntilDone", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
+ "withObject", "waitUntilDone", "modes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
+ "withObject", "waitUntilDone", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
+ "withObject", "waitUntilDone", "modes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground",
+ "withObject", NULL);
+
+ // Specially handle NSData.
+ RetainSummary *dataWithBytesNoCopySumm =
+ getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC), DoNothing,
+ DoNothing);
+ addClsMethSummary("NSData", dataWithBytesNoCopySumm,
+ "dataWithBytesNoCopy", "length", NULL);
+ addClsMethSummary("NSData", dataWithBytesNoCopySumm,
+ "dataWithBytesNoCopy", "length", "freeWhenDone", NULL);
+}
+
+void RetainSummaryManager::InitializeMethodSummaries() {
+
+ assert (ScratchArgs.isEmpty());
+
+ // Create the "init" selector. It just acts as a pass-through for the
+ // receiver.
+ addNSObjectMethSummary(GetNullarySelector("init", Ctx),
+ getPersistentSummary(RetEffect::MakeOwnedWhenTrackedReceiver(),
+ DecRefMsg));
+
+ // The next methods are allocators.
+ RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
+
+ // Create the "copy" selector.
+ addNSObjectMethSummary(GetNullarySelector("copy", Ctx), AllocSumm);
+
+ // Create the "mutableCopy" selector.
+ addNSObjectMethSummary(GetNullarySelector("mutableCopy", Ctx), AllocSumm);
+
+ // Create the "retain" selector.
+ RetEffect E = RetEffect::MakeReceiverAlias();
+ RetainSummary *Summ = getPersistentSummary(E, IncRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
+
+ // Create the "release" selector.
+ Summ = getPersistentSummary(E, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
+
+ // Create the "drain" selector.
+ Summ = getPersistentSummary(E, isGCEnabled() ? DoNothing : DecRef);
+ addNSObjectMethSummary(GetNullarySelector("drain", Ctx), Summ);
+
+ // Create the -dealloc summary.
+ Summ = getPersistentSummary(RetEffect::MakeNoRet(), Dealloc);
+ addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
+
+ // Create the "autorelease" selector.
+ Summ = getPersistentSummary(E, Autorelease);
+ addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
+
+ // Specially handle NSAutoreleasePool.
+ addInstMethSummary("NSAutoreleasePool", "init",
+ getPersistentSummary(RetEffect::MakeReceiverAlias(),
+ NewAutoreleasePool));
+
+ // For NSWindow, allocated objects are (initially) self-owned.
+ // FIXME: For now we opt for false negatives with NSWindow, as these objects
+ // self-own themselves. However, they only do this once they are displayed.
+ // Thus, we need to track an NSWindow's display status.
+ // This is tracked in <rdar://problem/6062711>.
+ // See also http://llvm.org/bugs/show_bug.cgi?id=3714.
+ RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking,
+ StopTracking);
+
+ addClassMethSummary("NSWindow", "alloc", NoTrackYet);
+
+#if 0
+ addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", NULL);
+
+ addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", "screen", NULL);
+#endif
+
+ // For NSPanel (which subclasses NSWindow), allocated objects are not
+ // self-owned.
+ // FIXME: For now we don't track NSPanels. object for the same reason
+ // as for NSWindow objects.
+ addClassMethSummary("NSPanel", "alloc", NoTrackYet);
+
+#if 0
+ addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", NULL);
+
+ addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", "screen", NULL);
+#endif
+
+ // Don't track allocated autorelease pools yet, as it is okay to prematurely
+ // exit a method.
+ addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
+
+ // Create NSAssertionHandler summaries.
+ addPanicSummary("NSAssertionHandler", "handleFailureInFunction", "file",
+ "lineNumber", "description", NULL);
+
+ addPanicSummary("NSAssertionHandler", "handleFailureInMethod", "object",
+ "file", "lineNumber", "description", NULL);
+
+ // Create summaries QCRenderer/QCView -createSnapShotImageOfType:
+ addInstMethSummary("QCRenderer", AllocSumm,
+ "createSnapshotImageOfType", NULL);
+ addInstMethSummary("QCView", AllocSumm,
+ "createSnapshotImageOfType", NULL);
+
+ // Create summaries for CIContext, 'createCGImage'.
+ addInstMethSummary("CIContext", AllocSumm,
+ "createCGImage", "fromRect", NULL);
+ addInstMethSummary("CIContext", AllocSumm,
+ "createCGImage", "fromRect", "format", "colorSpace", NULL);
+}
+
+//===----------------------------------------------------------------------===//
+// Reference-counting logic (typestate + counts).
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN RefVal {
+public:
+ enum Kind {
+ Owned = 0, // Owning reference.
+ NotOwned, // Reference is not owned by still valid (not freed).
+ Released, // Object has been released.
+ ReturnedOwned, // Returned object passes ownership to caller.
+ ReturnedNotOwned, // Return object does not pass ownership to caller.
+ ERROR_START,
+ ErrorDeallocNotOwned, // -dealloc called on non-owned object.
+ ErrorDeallocGC, // Calling -dealloc with GC enabled.
+ ErrorUseAfterRelease, // Object used after released.
+ ErrorReleaseNotOwned, // Release of an object that was not owned.
+ ERROR_LEAK_START,
+ ErrorLeak, // A memory leak due to excessive reference counts.
+ ErrorLeakReturned, // A memory leak due to the returning method not having
+ // the correct naming conventions.
+ ErrorGCLeakReturned,
+ ErrorOverAutorelease,
+ ErrorReturnedNotOwned
+ };
+
+private:
+ Kind kind;
+ RetEffect::ObjKind okind;
+ unsigned Cnt;
+ unsigned ACnt;
+ QualType T;
+
+ RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t)
+ : kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {}
+
+ RefVal(Kind k, unsigned cnt = 0)
+ : kind(k), okind(RetEffect::AnyObj), Cnt(cnt), ACnt(0) {}
+
+public:
+ Kind getKind() const { return kind; }
+
+ RetEffect::ObjKind getObjKind() const { return okind; }
+
+ unsigned getCount() const { return Cnt; }
+ unsigned getAutoreleaseCount() const { return ACnt; }
+ unsigned getCombinedCounts() const { return Cnt + ACnt; }
+ void clearCounts() { Cnt = 0; ACnt = 0; }
+ void setCount(unsigned i) { Cnt = i; }
+ void setAutoreleaseCount(unsigned i) { ACnt = i; }
+
+ QualType getType() const { return T; }
+
+ // Useful predicates.
+
+ static bool isError(Kind k) { return k >= ERROR_START; }
+
+ static bool isLeak(Kind k) { return k >= ERROR_LEAK_START; }
+
+ bool isOwned() const {
+ return getKind() == Owned;
+ }
+
+ bool isNotOwned() const {
+ return getKind() == NotOwned;
+ }
+
+ bool isReturnedOwned() const {
+ return getKind() == ReturnedOwned;
+ }
+
+ bool isReturnedNotOwned() const {
+ return getKind() == ReturnedNotOwned;
+ }
+
+ bool isNonLeakError() const {
+ Kind k = getKind();
+ return isError(k) && !isLeak(k);
+ }
+
+ static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 1) {
+ return RefVal(Owned, o, Count, 0, t);
+ }
+
+ static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 0) {
+ return RefVal(NotOwned, o, Count, 0, t);
+ }
+
+ // Comparison, profiling, and pretty-printing.
+
+ bool operator==(const RefVal& X) const {
+ return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
+ }
+
+ RefVal operator-(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() - i,
+ getAutoreleaseCount(), getType());
+ }
+
+ RefVal operator+(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() + i,
+ getAutoreleaseCount(), getType());
+ }
+
+ RefVal operator^(Kind k) const {
+ return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
+ getType());
+ }
+
+ RefVal autorelease() const {
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
+ getType());
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned) kind);
+ ID.AddInteger(Cnt);
+ ID.AddInteger(ACnt);
+ ID.Add(T);
+ }
+
+ void print(std::ostream& Out) const;
+};
+
+void RefVal::print(std::ostream& Out) const {
+ if (!T.isNull())
+ Out << "Tracked Type:" << T.getAsString() << '\n';
+
+ switch (getKind()) {
+ default: assert(false);
+ case Owned: {
+ Out << "Owned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case NotOwned: {
+ Out << "NotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedOwned: {
+ Out << "ReturnedOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedNotOwned: {
+ Out << "ReturnedNotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case Released:
+ Out << "Released";
+ break;
+
+ case ErrorDeallocGC:
+ Out << "-dealloc (GC)";
+ break;
+
+ case ErrorDeallocNotOwned:
+ Out << "-dealloc (not-owned)";
+ break;
+
+ case ErrorLeak:
+ Out << "Leaked";
+ break;
+
+ case ErrorLeakReturned:
+ Out << "Leaked (Bad naming)";
+ break;
+
+ case ErrorGCLeakReturned:
+ Out << "Leaked (GC-ed at return)";
+ break;
+
+ case ErrorUseAfterRelease:
+ Out << "Use-After-Release [ERROR]";
+ break;
+
+ case ErrorReleaseNotOwned:
+ Out << "Release of Not-Owned [ERROR]";
+ break;
+
+ case RefVal::ErrorOverAutorelease:
+ Out << "Over autoreleased";
+ break;
+
+ case RefVal::ErrorReturnedNotOwned:
+ Out << "Non-owned object returned instead of owned";
+ break;
+ }
+
+ if (ACnt) {
+ Out << " [ARC +" << ACnt << ']';
+ }
+}
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RefBindings - State used to track object reference counts.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings;
+static int RefBIndex = 0;
+
+namespace clang {
+ template<>
+ struct GRStateTrait<RefBindings> : public GRStatePartialTrait<RefBindings> {
+ static inline void* GDMIndex() { return &RefBIndex; }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// AutoreleaseBindings - State used to track objects in autorelease pools.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> ARCounts;
+typedef llvm::ImmutableMap<SymbolRef, ARCounts> ARPoolContents;
+typedef llvm::ImmutableList<SymbolRef> ARStack;
+
+static int AutoRCIndex = 0;
+static int AutoRBIndex = 0;
+
+namespace { class VISIBILITY_HIDDEN AutoreleasePoolContents {}; }
+namespace { class VISIBILITY_HIDDEN AutoreleaseStack {}; }
+
+namespace clang {
+template<> struct GRStateTrait<AutoreleaseStack>
+ : public GRStatePartialTrait<ARStack> {
+ static inline void* GDMIndex() { return &AutoRBIndex; }
+};
+
+template<> struct GRStateTrait<AutoreleasePoolContents>
+ : public GRStatePartialTrait<ARPoolContents> {
+ static inline void* GDMIndex() { return &AutoRCIndex; }
+};
+} // end clang namespace
+
+static SymbolRef GetCurrentAutoreleasePool(const GRState* state) {
+ ARStack stack = state->get<AutoreleaseStack>();
+ return stack.isEmpty() ? SymbolRef() : stack.getHead();
+}
+
+static GRStateRef SendAutorelease(GRStateRef state, ARCounts::Factory &F,
+ SymbolRef sym) {
+
+ SymbolRef pool = GetCurrentAutoreleasePool(state);
+ const ARCounts *cnts = state.get<AutoreleasePoolContents>(pool);
+ ARCounts newCnts(0);
+
+ if (cnts) {
+ const unsigned *cnt = (*cnts).lookup(sym);
+ newCnts = F.Add(*cnts, sym, cnt ? *cnt + 1 : 1);
+ }
+ else
+ newCnts = F.Add(F.GetEmptyMap(), sym, 1);
+
+ return state.set<AutoreleasePoolContents>(pool, newCnts);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN CFRefCount : public GRSimpleVals {
+public:
+ class BindingsPrinter : public GRState::Printer {
+ public:
+ virtual void Print(std::ostream& Out, const GRState* state,
+ const char* nl, const char* sep);
+ };
+
+private:
+ typedef llvm::DenseMap<const GRExprEngine::NodeTy*, const RetainSummary*>
+ SummaryLogTy;
+
+ RetainSummaryManager Summaries;
+ SummaryLogTy SummaryLog;
+ const LangOptions& LOpts;
+ ARCounts::Factory ARCountFactory;
+
+ BugType *useAfterRelease, *releaseNotOwned;
+ BugType *deallocGC, *deallocNotOwned;
+ BugType *leakWithinFunction, *leakAtReturn;
+ BugType *overAutorelease;
+ BugType *returnNotOwnedForOwned;
+ BugReporter *BR;
+
+ GRStateRef Update(GRStateRef state, SymbolRef sym, RefVal V, ArgEffect E,
+ RefVal::Kind& hasErr);
+
+ void ProcessNonLeakError(ExplodedNodeSet<GRState>& Dst,
+ GRStmtNodeBuilder<GRState>& Builder,
+ Expr* NodeExpr, Expr* ErrorExpr,
+ ExplodedNode<GRState>* Pred,
+ const GRState* St,
+ RefVal::Kind hasErr, SymbolRef Sym);
+
+ GRStateRef HandleSymbolDeath(GRStateRef state, SymbolRef sid, RefVal V,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked);
+
+ ExplodedNode<GRState>* ProcessLeaks(GRStateRef state,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked,
+ GenericNodeBuilder &Builder,
+ GRExprEngine &Eng,
+ ExplodedNode<GRState> *Pred = 0);
+
+public:
+ CFRefCount(ASTContext& Ctx, bool gcenabled, const LangOptions& lopts)
+ : Summaries(Ctx, gcenabled),
+ LOpts(lopts), useAfterRelease(0), releaseNotOwned(0),
+ deallocGC(0), deallocNotOwned(0),
+ leakWithinFunction(0), leakAtReturn(0), overAutorelease(0),
+ returnNotOwnedForOwned(0), BR(0) {}
+
+ virtual ~CFRefCount() {}
+
+ void RegisterChecks(BugReporter &BR);
+
+ virtual void RegisterPrinters(std::vector<GRState::Printer*>& Printers) {
+ Printers.push_back(new BindingsPrinter());
+ }
+
+ bool isGCEnabled() const { return Summaries.isGCEnabled(); }
+ const LangOptions& getLangOptions() const { return LOpts; }
+
+ const RetainSummary *getSummaryOfNode(const ExplodedNode<GRState> *N) const {
+ SummaryLogTy::const_iterator I = SummaryLog.find(N);
+ return I == SummaryLog.end() ? 0 : I->second;
+ }
+
+ // Calls.
+
+ void EvalSummary(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ Expr* Ex,
+ Expr* Receiver,
+ const RetainSummary& Summ,
+ ExprIterator arg_beg, ExprIterator arg_end,
+ ExplodedNode<GRState>* Pred);
+
+ virtual void EvalCall(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode<GRState>* Pred);
+
+
+ virtual void EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode<GRState>* Pred);
+
+ bool EvalObjCMessageExprAux(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode<GRState>* Pred);
+
+ // Stores.
+ virtual void EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val);
+
+ // End-of-path.
+
+ virtual void EvalEndPath(GRExprEngine& Engine,
+ GREndPathNodeBuilder<GRState>& Builder);
+
+ virtual void EvalDeadSymbols(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ExplodedNode<GRState>* Pred,
+ Stmt* S, const GRState* state,
+ SymbolReaper& SymReaper);
+
+ std::pair<ExplodedNode<GRState>*, GRStateRef>
+ HandleAutoreleaseCounts(GRStateRef state, GenericNodeBuilder Bd,
+ ExplodedNode<GRState>* Pred, GRExprEngine &Eng,
+ SymbolRef Sym, RefVal V, bool &stop);
+ // Return statements.
+
+ virtual void EvalReturn(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ReturnStmt* S,
+ ExplodedNode<GRState>* Pred);
+
+ // Assumptions.
+
+ virtual const GRState* EvalAssume(GRStateManager& VMgr,
+ const GRState* St, SVal Cond,
+ bool Assumption, bool& isFeasible);
+};
+
+} // end anonymous namespace
+
+static void PrintPool(std::ostream &Out, SymbolRef Sym, const GRState *state) {
+ Out << ' ';
+ if (Sym)
+ Out << Sym->getSymbolID();
+ else
+ Out << "<pool>";
+ Out << ":{";
+
+ // Get the contents of the pool.
+ if (const ARCounts *cnts = state->get<AutoreleasePoolContents>(Sym))
+ for (ARCounts::iterator J=cnts->begin(), EJ=cnts->end(); J != EJ; ++J)
+ Out << '(' << J.getKey() << ',' << J.getData() << ')';
+
+ Out << '}';
+}
+
+void CFRefCount::BindingsPrinter::Print(std::ostream& Out, const GRState* state,
+ const char* nl, const char* sep) {
+
+
+
+ RefBindings B = state->get<RefBindings>();
+
+ if (!B.isEmpty())
+ Out << sep << nl;
+
+ for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ Out << (*I).first << " : ";
+ (*I).second.print(Out);
+ Out << nl;
+ }
+
+ // Print the autorelease stack.
+ Out << sep << nl << "AR pool stack:";
+ ARStack stack = state->get<AutoreleaseStack>();
+
+ PrintPool(Out, SymbolRef(), state); // Print the caller's pool.
+ for (ARStack::iterator I=stack.begin(), E=stack.end(); I!=E; ++I)
+ PrintPool(Out, *I, state);
+
+ Out << nl;
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+ //===-------------===//
+ // Bug Descriptions. //
+ //===-------------===//
+
+ class VISIBILITY_HIDDEN CFRefBug : public BugType {
+ protected:
+ CFRefCount& TF;
+
+ CFRefBug(CFRefCount* tf, const char* name)
+ : BugType(name, "Memory (Core Foundation/Objective-C)"), TF(*tf) {}
+ public:
+
+ CFRefCount& getTF() { return TF; }
+ const CFRefCount& getTF() const { return TF; }
+
+ // FIXME: Eventually remove.
+ virtual const char* getDescription() const = 0;
+
+ virtual bool isLeak() const { return false; }
+ };
+
+ class VISIBILITY_HIDDEN UseAfterRelease : public CFRefBug {
+ public:
+ UseAfterRelease(CFRefCount* tf)
+ : CFRefBug(tf, "Use-after-release") {}
+
+ const char* getDescription() const {
+ return "Reference-counted object is used after it is released";
+ }
+ };
+
+ class VISIBILITY_HIDDEN BadRelease : public CFRefBug {
+ public:
+ BadRelease(CFRefCount* tf) : CFRefBug(tf, "Bad release") {}
+
+ const char* getDescription() const {
+ return "Incorrect decrement of the reference count of an "
+ "object is not owned at this point by the caller";
+ }
+ };
+
+ class VISIBILITY_HIDDEN DeallocGC : public CFRefBug {
+ public:
+ DeallocGC(CFRefCount *tf)
+ : CFRefBug(tf, "-dealloc called while using garbage collection") {}
+
+ const char *getDescription() const {
+ return "-dealloc called while using garbage collection";
+ }
+ };
+
+ class VISIBILITY_HIDDEN DeallocNotOwned : public CFRefBug {
+ public:
+ DeallocNotOwned(CFRefCount *tf)
+ : CFRefBug(tf, "-dealloc sent to non-exclusively owned object") {}
+
+ const char *getDescription() const {
+ return "-dealloc sent to object that may be referenced elsewhere";
+ }
+ };
+
+ class VISIBILITY_HIDDEN OverAutorelease : public CFRefBug {
+ public:
+ OverAutorelease(CFRefCount *tf) :
+ CFRefBug(tf, "Object sent -autorelease too many times") {}
+
+ const char *getDescription() const {
+ return "Object sent -autorelease too many times";
+ }
+ };
+
+ class VISIBILITY_HIDDEN ReturnedNotOwnedForOwned : public CFRefBug {
+ public:
+ ReturnedNotOwnedForOwned(CFRefCount *tf) :
+ CFRefBug(tf, "Method should return an owned object") {}
+
+ const char *getDescription() const {
+ return "Object with +0 retain counts returned to caller where a +1 "
+ "(owning) retain count is expected";
+ }
+ };
+
+ class VISIBILITY_HIDDEN Leak : public CFRefBug {
+ const bool isReturn;
+ protected:
+ Leak(CFRefCount* tf, const char* name, bool isRet)
+ : CFRefBug(tf, name), isReturn(isRet) {}
+ public:
+
+ const char* getDescription() const { return ""; }
+
+ bool isLeak() const { return true; }
+ };
+
+ class VISIBILITY_HIDDEN LeakAtReturn : public Leak {
+ public:
+ LeakAtReturn(CFRefCount* tf, const char* name)
+ : Leak(tf, name, true) {}
+ };
+
+ class VISIBILITY_HIDDEN LeakWithinFunction : public Leak {
+ public:
+ LeakWithinFunction(CFRefCount* tf, const char* name)
+ : Leak(tf, name, false) {}
+ };
+
+ //===---------===//
+ // Bug Reports. //
+ //===---------===//
+
+ class VISIBILITY_HIDDEN CFRefReport : public RangedBugReport {
+ protected:
+ SymbolRef Sym;
+ const CFRefCount &TF;
+ public:
+ CFRefReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode<GRState> *n, SymbolRef sym)
+ : RangedBugReport(D, D.getDescription(), n), Sym(sym), TF(tf) {}
+
+ CFRefReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode<GRState> *n, SymbolRef sym, const char* endText)
+ : RangedBugReport(D, D.getDescription(), endText, n), Sym(sym), TF(tf) {}
+
+ virtual ~CFRefReport() {}
+
+ CFRefBug& getBugType() {
+ return (CFRefBug&) RangedBugReport::getBugType();
+ }
+ const CFRefBug& getBugType() const {
+ return (const CFRefBug&) RangedBugReport::getBugType();
+ }
+
+ virtual void getRanges(BugReporter& BR, const SourceRange*& beg,
+ const SourceRange*& end) {
+
+ if (!getBugType().isLeak())
+ RangedBugReport::getRanges(BR, beg, end);
+ else
+ beg = end = 0;
+ }
+
+ SymbolRef getSymbol() const { return Sym; }
+
+ PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N);
+
+ std::pair<const char**,const char**> getExtraDescriptiveText();
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode<GRState>* N,
+ const ExplodedNode<GRState>* PrevN,
+ BugReporterContext& BRC);
+ };
+
+ class VISIBILITY_HIDDEN CFRefLeakReport : public CFRefReport {
+ SourceLocation AllocSite;
+ const MemRegion* AllocBinding;
+ public:
+ CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode<GRState> *n, SymbolRef sym,
+ GRExprEngine& Eng);
+
+ PathDiagnosticPiece* getEndPath(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N);
+
+ SourceLocation getLocation() const { return AllocSite; }
+ };
+} // end anonymous namespace
+
+void CFRefCount::RegisterChecks(BugReporter& BR) {
+ useAfterRelease = new UseAfterRelease(this);
+ BR.Register(useAfterRelease);
+
+ releaseNotOwned = new BadRelease(this);
+ BR.Register(releaseNotOwned);
+
+ deallocGC = new DeallocGC(this);
+ BR.Register(deallocGC);
+
+ deallocNotOwned = new DeallocNotOwned(this);
+ BR.Register(deallocNotOwned);
+
+ overAutorelease = new OverAutorelease(this);
+ BR.Register(overAutorelease);
+
+ returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this);
+ BR.Register(returnNotOwnedForOwned);
+
+ // First register "return" leaks.
+ const char* name = 0;
+
+ if (isGCEnabled())
+ name = "Leak of returned object when using garbage collection";
+ else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
+ name = "Leak of returned object when not using garbage collection (GC) in "
+ "dual GC/non-GC code";
+ else {
+ assert(getLangOptions().getGCMode() == LangOptions::NonGC);
+ name = "Leak of returned object";
+ }
+
+ leakAtReturn = new LeakAtReturn(this, name);
+ BR.Register(leakAtReturn);
+
+ // Second, register leaks within a function/method.
+ if (isGCEnabled())
+ name = "Leak of object when using garbage collection";
+ else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
+ name = "Leak of object when not using garbage collection (GC) in "
+ "dual GC/non-GC code";
+ else {
+ assert(getLangOptions().getGCMode() == LangOptions::NonGC);
+ name = "Leak";
+ }
+
+ leakWithinFunction = new LeakWithinFunction(this, name);
+ BR.Register(leakWithinFunction);
+
+ // Save the reference to the BugReporter.
+ this->BR = &BR;
+}
+
+static const char* Msgs[] = {
+ // GC only
+ "Code is compiled to only use garbage collection",
+ // No GC.
+ "Code is compiled to use reference counts",
+ // Hybrid, with GC.
+ "Code is compiled to use either garbage collection (GC) or reference counts"
+ " (non-GC). The bug occurs with GC enabled",
+ // Hybrid, without GC
+ "Code is compiled to use either garbage collection (GC) or reference counts"
+ " (non-GC). The bug occurs in non-GC mode"
+};
+
+std::pair<const char**,const char**> CFRefReport::getExtraDescriptiveText() {
+ CFRefCount& TF = static_cast<CFRefBug&>(getBugType()).getTF();
+
+ switch (TF.getLangOptions().getGCMode()) {
+ default:
+ assert(false);
+
+ case LangOptions::GCOnly:
+ assert (TF.isGCEnabled());
+ return std::make_pair(&Msgs[0], &Msgs[0]+1);
+
+ case LangOptions::NonGC:
+ assert (!TF.isGCEnabled());
+ return std::make_pair(&Msgs[1], &Msgs[1]+1);
+
+ case LangOptions::HybridGC:
+ if (TF.isGCEnabled())
+ return std::make_pair(&Msgs[2], &Msgs[2]+1);
+ else
+ return std::make_pair(&Msgs[3], &Msgs[3]+1);
+ }
+}
+
+static inline bool contains(const llvm::SmallVectorImpl<ArgEffect>& V,
+ ArgEffect X) {
+ for (llvm::SmallVectorImpl<ArgEffect>::const_iterator I=V.begin(), E=V.end();
+ I!=E; ++I)
+ if (*I == X) return true;
+
+ return false;
+}
+
+PathDiagnosticPiece* CFRefReport::VisitNode(const ExplodedNode<GRState>* N,
+ const ExplodedNode<GRState>* PrevN,
+ BugReporterContext& BRC) {
+
+ if (!isa<PostStmt>(N->getLocation()))
+ return NULL;
+
+ // Check if the type state has changed.
+ GRStateManager &StMgr = BRC.getStateManager();
+ GRStateRef PrevSt(PrevN->getState(), StMgr);
+ GRStateRef CurrSt(N->getState(), StMgr);
+
+ const RefVal* CurrT = CurrSt.get<RefBindings>(Sym);
+ if (!CurrT) return NULL;
+
+ const RefVal& CurrV = *CurrT;
+ const RefVal* PrevT = PrevSt.get<RefBindings>(Sym);
+
+ // Create a string buffer to constain all the useful things we want
+ // to tell the user.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ // This is the allocation site since the previous node had no bindings
+ // for this symbol.
+ if (!PrevT) {
+ Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Get the name of the callee (if it is available).
+ SVal X = CurrSt.GetSValAsScalarOrLoc(CE->getCallee());
+ if (const FunctionDecl* FD = X.getAsFunctionDecl())
+ os << "Call to function '" << FD->getNameAsString() <<'\'';
+ else
+ os << "function call";
+ }
+ else {
+ assert (isa<ObjCMessageExpr>(S));
+ os << "Method";
+ }
+
+ if (CurrV.getObjKind() == RetEffect::CF) {
+ os << " returns a Core Foundation object with a ";
+ }
+ else {
+ assert (CurrV.getObjKind() == RetEffect::ObjC);
+ os << " returns an Objective-C object with a ";
+ }
+
+ if (CurrV.isOwned()) {
+ os << "+1 retain count (owning reference).";
+
+ if (static_cast<CFRefBug&>(getBugType()).getTF().isGCEnabled()) {
+ assert(CurrV.getObjKind() == RetEffect::CF);
+ os << " "
+ "Core Foundation objects are not automatically garbage collected.";
+ }
+ }
+ else {
+ assert (CurrV.isNotOwned());
+ os << "+0 retain count (non-owning reference).";
+ }
+
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(Pos, os.str());
+ }
+
+ // Gather up the effects that were performed on the object at this
+ // program point
+ llvm::SmallVector<ArgEffect, 2> AEffects;
+
+ if (const RetainSummary *Summ =
+ TF.getSummaryOfNode(BRC.getNodeResolver().getOriginalNode(N))) {
+ // We only have summaries attached to nodes after evaluating CallExpr and
+ // ObjCMessageExprs.
+ Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Iterate through the parameter expressions and see if the symbol
+ // was ever passed as an argument.
+ unsigned i = 0;
+
+ for (CallExpr::arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
+ AI!=AE; ++AI, ++i) {
+
+ // Retrieve the value of the argument. Is it the symbol
+ // we are interested in?
+ if (CurrSt.GetSValAsScalarOrLoc(*AI).getAsLocSymbol() != Sym)
+ continue;
+
+ // We have an argument. Get the effect!
+ AEffects.push_back(Summ->getArg(i));
+ }
+ }
+ else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ if (Expr *receiver = ME->getReceiver())
+ if (CurrSt.GetSValAsScalarOrLoc(receiver).getAsLocSymbol() == Sym) {
+ // The symbol we are tracking is the receiver.
+ AEffects.push_back(Summ->getReceiverEffect());
+ }
+ }
+ }
+
+ do {
+ // Get the previous type state.
+ RefVal PrevV = *PrevT;
+
+ // Specially handle -dealloc.
+ if (!TF.isGCEnabled() && contains(AEffects, Dealloc)) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+ // We may not have transitioned to 'release' if we hit an error.
+ // This case is handled elsewhere.
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCombinedCounts() == 0);
+ os << "Object released by directly sending the '-dealloc' message";
+ break;
+ }
+ }
+
+ // Specially handle CFMakeCollectable and friends.
+ if (contains(AEffects, MakeCollectable)) {
+ // Get the name of the function.
+ Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ SVal X = CurrSt.GetSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee());
+ const FunctionDecl* FD = X.getAsFunctionDecl();
+ const std::string& FName = FD->getNameAsString();
+
+ if (TF.isGCEnabled()) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+
+ os << "In GC mode a call to '" << FName
+ << "' decrements an object's retain count and registers the "
+ "object with the garbage collector. ";
+
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCount() == 0);
+ os << "Since it now has a 0 retain count the object can be "
+ "automatically collected by the garbage collector.";
+ }
+ else
+ os << "An object must have a 0 retain count to be garbage collected. "
+ "After this call its retain count is +" << CurrV.getCount()
+ << '.';
+ }
+ else
+ os << "When GC is not enabled a call to '" << FName
+ << "' has no effect on its argument.";
+
+ // Nothing more to say.
+ break;
+ }
+
+ // Determine if the typestate has changed.
+ if (!(PrevV == CurrV))
+ switch (CurrV.getKind()) {
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+
+ if (PrevV.getCount() == CurrV.getCount()) {
+ // Did an autorelease message get sent?
+ if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
+ return 0;
+
+ assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
+ os << "Object sent -autorelease message";
+ break;
+ }
+
+ if (PrevV.getCount() > CurrV.getCount())
+ os << "Reference count decremented.";
+ else
+ os << "Reference count incremented.";
+
+ if (unsigned Count = CurrV.getCount())
+ os << " The object now has a +" << Count << " retain count.";
+
+ if (PrevV.getKind() == RefVal::Released) {
+ assert(TF.isGCEnabled() && CurrV.getCount() > 0);
+ os << " The object is not eligible for garbage collection until the "
+ "retain count reaches 0 again.";
+ }
+
+ break;
+
+ case RefVal::Released:
+ os << "Object released.";
+ break;
+
+ case RefVal::ReturnedOwned:
+ os << "Object returned to caller as an owning reference (single retain "
+ "count transferred to caller).";
+ break;
+
+ case RefVal::ReturnedNotOwned:
+ os << "Object returned to caller with a +0 (non-owning) retain count.";
+ break;
+
+ default:
+ return NULL;
+ }
+
+ // Emit any remaining diagnostics for the argument effects (if any).
+ for (llvm::SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
+ E=AEffects.end(); I != E; ++I) {
+
+ // A bunch of things have alternate behavior under GC.
+ if (TF.isGCEnabled())
+ switch (*I) {
+ default: break;
+ case Autorelease:
+ os << "In GC mode an 'autorelease' has no effect.";
+ continue;
+ case IncRefMsg:
+ os << "In GC mode the 'retain' message has no effect.";
+ continue;
+ case DecRefMsg:
+ os << "In GC mode the 'release' message has no effect.";
+ continue;
+ }
+ }
+ } while(0);
+
+ if (os.str().empty())
+ return 0; // We have nothing to say!
+
+ Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager());
+ PathDiagnosticPiece* P = new PathDiagnosticEventPiece(Pos, os.str());
+
+ // Add the range by scanning the children of the statement for any bindings
+ // to Sym.
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Expr* Exp = dyn_cast_or_null<Expr>(*I))
+ if (CurrSt.GetSValAsScalarOrLoc(Exp).getAsLocSymbol() == Sym) {
+ P->addRange(Exp->getSourceRange());
+ break;
+ }
+
+ return P;
+}
+
+namespace {
+ class VISIBILITY_HIDDEN FindUniqueBinding :
+ public StoreManager::BindingsHandler {
+ SymbolRef Sym;
+ const MemRegion* Binding;
+ bool First;
+
+ public:
+ FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
+ SVal val) {
+
+ SymbolRef SymV = val.getAsSymbol();
+ if (!SymV || SymV != Sym)
+ return true;
+
+ if (Binding) {
+ First = false;
+ return false;
+ }
+ else
+ Binding = R;
+
+ return true;
+ }
+
+ operator bool() { return First && Binding; }
+ const MemRegion* getRegion() { return Binding; }
+ };
+}
+
+static std::pair<const ExplodedNode<GRState>*,const MemRegion*>
+GetAllocationSite(GRStateManager& StateMgr, const ExplodedNode<GRState>* N,
+ SymbolRef Sym) {
+
+ // Find both first node that referred to the tracked symbol and the
+ // memory location that value was store to.
+ const ExplodedNode<GRState>* Last = N;
+ const MemRegion* FirstBinding = 0;
+
+ while (N) {
+ const GRState* St = N->getState();
+ RefBindings B = St->get<RefBindings>();
+
+ if (!B.lookup(Sym))
+ break;
+
+ FindUniqueBinding FB(Sym);
+ StateMgr.iterBindings(St, FB);
+ if (FB) FirstBinding = FB.getRegion();
+
+ Last = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+ }
+
+ return std::make_pair(Last, FirstBinding);
+}
+
+PathDiagnosticPiece*
+CFRefReport::getEndPath(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* EndN) {
+ // Tell the BugReporterContext to report cases when the tracked symbol is
+ // assigned to different variables, etc.
+ BRC.addNotableSymbol(Sym);
+ return RangedBugReport::getEndPath(BRC, EndN);
+}
+
+PathDiagnosticPiece*
+CFRefLeakReport::getEndPath(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* EndN){
+
+ // Tell the BugReporterContext to report cases when the tracked symbol is
+ // assigned to different variables, etc.
+ BRC.addNotableSymbol(Sym);
+
+ // We are reporting a leak. Walk up the graph to get to the first node where
+ // the symbol appeared, and also get the first VarDecl that tracked object
+ // is stored to.
+ const ExplodedNode<GRState>* AllocNode = 0;
+ const MemRegion* FirstBinding = 0;
+
+ llvm::tie(AllocNode, FirstBinding) =
+ GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+
+ // Get the allocate site.
+ assert(AllocNode);
+ Stmt* FirstStmt = cast<PostStmt>(AllocNode->getLocation()).getStmt();
+
+ SourceManager& SMgr = BRC.getSourceManager();
+ unsigned AllocLine =SMgr.getInstantiationLineNumber(FirstStmt->getLocStart());
+
+ // Compute an actual location for the leak. Sometimes a leak doesn't
+ // occur at an actual statement (e.g., transition between blocks; end
+ // of function) so we need to walk the graph and compute a real location.
+ const ExplodedNode<GRState>* LeakN = EndN;
+ PathDiagnosticLocation L;
+
+ while (LeakN) {
+ ProgramPoint P = LeakN->getLocation();
+
+ if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ L = PathDiagnosticLocation(PS->getStmt()->getLocStart(), SMgr);
+ break;
+ }
+ else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ if (const Stmt* Term = BE->getSrc()->getTerminator()) {
+ L = PathDiagnosticLocation(Term->getLocStart(), SMgr);
+ break;
+ }
+ }
+
+ LeakN = LeakN->succ_empty() ? 0 : *(LeakN->succ_begin());
+ }
+
+ if (!L.isValid()) {
+ const Decl &D = BRC.getCodeDecl();
+ L = PathDiagnosticLocation(D.getBodyRBrace(BRC.getASTContext()), SMgr);
+ }
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Object allocated on line " << AllocLine;
+
+ if (FirstBinding)
+ os << " and stored into '" << FirstBinding->getString() << '\'';
+
+ // Get the retain count.
+ const RefVal* RV = EndN->getState()->get<RefBindings>(Sym);
+
+ if (RV->getKind() == RefVal::ErrorLeakReturned) {
+ // FIXME: Per comments in rdar://6320065, "create" only applies to CF
+ // ojbects. Only "copy", "alloc", "retain" and "new" transfer ownership
+ // to the caller for NS objects.
+ ObjCMethodDecl& MD = cast<ObjCMethodDecl>(BRC.getCodeDecl());
+ os << " is returned from a method whose name ('"
+ << MD.getSelector().getAsString()
+ << "') does not contain 'copy' or otherwise starts with"
+ " 'new' or 'alloc'. This violates the naming convention rules given"
+ " in the Memory Management Guide for Cocoa (object leaked)";
+ }
+ else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
+ ObjCMethodDecl& MD = cast<ObjCMethodDecl>(BRC.getCodeDecl());
+ os << " and returned from method '" << MD.getSelector().getAsString()
+ << "' is potentially leaked when using garbage collection. Callers "
+ "of this method do not expect a returned object with a +1 retain "
+ "count since they expect the object to be managed by the garbage "
+ "collector";
+ }
+ else
+ os << " is no longer referenced after this point and has a retain count of"
+ " +" << RV->getCount() << " (object leaked)";
+
+ return new PathDiagnosticEventPiece(L, os.str());
+}
+
+CFRefLeakReport::CFRefLeakReport(CFRefBug& D, const CFRefCount &tf,
+ ExplodedNode<GRState> *n,
+ SymbolRef sym, GRExprEngine& Eng)
+: CFRefReport(D, tf, n, sym)
+{
+
+ // Most bug reports are cached at the location where they occured.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path. To do this, we need to find
+ // the allocation site of a piece of tracked memory, which we do via a
+ // call to GetAllocationSite. This will walk the ExplodedGraph backwards.
+ // Note that this is *not* the trimmed graph; we are guaranteed, however,
+ // that all ancestor nodes that represent the allocation site have the
+ // same SourceLocation.
+ const ExplodedNode<GRState>* AllocNode = 0;
+
+ llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
+ GetAllocationSite(Eng.getStateManager(), getEndNode(), getSymbol());
+
+ // Get the SourceLocation for the allocation site.
+ ProgramPoint P = AllocNode->getLocation();
+ AllocSite = cast<PostStmt>(P).getStmt()->getLocStart();
+
+ // Fill in the description of the bug.
+ Description.clear();
+ llvm::raw_string_ostream os(Description);
+ SourceManager& SMgr = Eng.getContext().getSourceManager();
+ unsigned AllocLine = SMgr.getInstantiationLineNumber(AllocSite);
+ os << "Potential leak ";
+ if (tf.isGCEnabled()) {
+ os << "(when using garbage collection) ";
+ }
+ os << "of an object allocated on line " << AllocLine;
+
+ // FIXME: AllocBinding doesn't get populated for RegionStore yet.
+ if (AllocBinding)
+ os << " and stored into '" << AllocBinding->getString() << '\'';
+}
+
+//===----------------------------------------------------------------------===//
+// Main checker logic.
+//===----------------------------------------------------------------------===//
+
+/// GetReturnType - Used to get the return type of a message expression or
+/// function call with the intention of affixing that type to a tracked symbol.
+/// While the the return type can be queried directly from RetEx, when
+/// invoking class methods we augment to the return type to be that of
+/// a pointer to the class (as opposed it just being id).
+static QualType GetReturnType(Expr* RetE, ASTContext& Ctx) {
+
+ QualType RetTy = RetE->getType();
+
+ // FIXME: We aren't handling id<...>.
+ const PointerType* PT = RetTy->getAsPointerType();
+ if (!PT)
+ return RetTy;
+
+ // If RetEx is not a message expression just return its type.
+ // If RetEx is a message expression, return its types if it is something
+ /// more specific than id.
+
+ ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(RetE);
+
+ if (!ME || !Ctx.isObjCIdStructType(PT->getPointeeType()))
+ return RetTy;
+
+ ObjCInterfaceDecl* D = ME->getClassInfo().first;
+
+ // At this point we know the return type of the message expression is id.
+ // If we have an ObjCInterceDecl, we know this is a call to a class method
+ // whose type we can resolve. In such cases, promote the return type to
+ // Class*.
+ return !D ? RetTy : Ctx.getPointerType(Ctx.getObjCInterfaceType(D));
+}
+
+
+void CFRefCount::EvalSummary(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ Expr* Ex,
+ Expr* Receiver,
+ const RetainSummary& Summ,
+ ExprIterator arg_beg, ExprIterator arg_end,
+ ExplodedNode<GRState>* Pred) {
+
+ // Get the state.
+ GRStateManager& StateMgr = Eng.getStateManager();
+ GRStateRef state(Builder.GetState(Pred), StateMgr);
+ ASTContext& Ctx = StateMgr.getContext();
+ ValueManager &ValMgr = Eng.getValueManager();
+
+ // Evaluate the effect of the arguments.
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ unsigned idx = 0;
+ Expr* ErrorExpr = NULL;
+ SymbolRef ErrorSym = 0;
+
+ for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
+ SVal V = state.GetSValAsScalarOrLoc(*I);
+ SymbolRef Sym = V.getAsLocSymbol();
+
+ if (Sym)
+ if (RefBindings::data_type* T = state.get<RefBindings>(Sym)) {
+ state = Update(state, Sym, *T, Summ.getArg(idx), hasErr);
+ if (hasErr) {
+ ErrorExpr = *I;
+ ErrorSym = Sym;
+ break;
+ }
+ continue;
+ }
+
+ if (isa<Loc>(V)) {
+ if (loc::MemRegionVal* MR = dyn_cast<loc::MemRegionVal>(&V)) {
+ if (Summ.getArg(idx) == DoNothingByRef)
+ continue;
+
+ // Invalidate the value of the variable passed by reference.
+
+ // FIXME: Either this logic should also be replicated in GRSimpleVals
+ // or should be pulled into a separate "constraint engine."
+
+ // FIXME: We can have collisions on the conjured symbol if the
+ // expression *I also creates conjured symbols. We probably want
+ // to identify conjured symbols by an expression pair: the enclosing
+ // expression (the context) and the expression itself. This should
+ // disambiguate conjured symbols.
+
+ const TypedRegion* R = dyn_cast<TypedRegion>(MR->getRegion());
+
+ if (R) {
+ // Are we dealing with an ElementRegion? If the element type is
+ // a basic integer type (e.g., char, int) and the underying region
+ // is a variable region then strip off the ElementRegion.
+ // FIXME: We really need to think about this for the general case
+ // as sometimes we are reasoning about arrays and other times
+ // about (char*), etc., is just a form of passing raw bytes.
+ // e.g., void *p = alloca(); foo((char*)p);
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // Checking for 'integral type' is probably too promiscuous, but
+ // we'll leave it in for now until we have a systematic way of
+ // handling all of these cases. Eventually we need to come up
+ // with an interface to StoreManager so that this logic can be
+ // approriately delegated to the respective StoreManagers while
+ // still allowing us to do checker-specific logic (e.g.,
+ // invalidating reference counts), probably via callbacks.
+ if (ER->getElementType()->isIntegralType()) {
+ const MemRegion *superReg = ER->getSuperRegion();
+ if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
+ isa<ObjCIvarRegion>(superReg))
+ R = cast<TypedRegion>(superReg);
+ }
+
+ // FIXME: What about layers of ElementRegions?
+ }
+
+ // Is the invalidated variable something that we were tracking?
+ SymbolRef Sym = state.GetSValAsScalarOrLoc(R).getAsLocSymbol();
+
+ // Remove any existing reference-count binding.
+ if (Sym) state = state.remove<RefBindings>(Sym);
+
+ if (R->isBoundable(Ctx)) {
+ // Set the value of the variable to be a conjured symbol.
+ unsigned Count = Builder.getCurrentBlockCount();
+ QualType T = R->getValueType(Ctx);
+
+ if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())){
+ ValueManager &ValMgr = Eng.getValueManager();
+ SVal V = ValMgr.getConjuredSymbolVal(*I, T, Count);
+ state = state.BindLoc(Loc::MakeVal(R), V);
+ }
+ else if (const RecordType *RT = T->getAsStructureType()) {
+ // Handle structs in a not so awesome way. Here we just
+ // eagerly bind new symbols to the fields. In reality we
+ // should have the store manager handle this. The idea is just
+ // to prototype some basic functionality here. All of this logic
+ // should one day soon just go away.
+ const RecordDecl *RD = RT->getDecl()->getDefinition(Ctx);
+
+ // No record definition. There is nothing we can do.
+ if (!RD)
+ continue;
+
+ MemRegionManager &MRMgr = state.getManager().getRegionManager();
+
+ // Iterate through the fields and construct new symbols.
+ for (RecordDecl::field_iterator FI=RD->field_begin(Ctx),
+ FE=RD->field_end(Ctx); FI!=FE; ++FI) {
+
+ // For now just handle scalar fields.
+ FieldDecl *FD = *FI;
+ QualType FT = FD->getType();
+
+ if (Loc::IsLocType(FT) ||
+ (FT->isIntegerType() && FT->isScalarType())) {
+ const FieldRegion* FR = MRMgr.getFieldRegion(FD, R);
+
+ SVal V = ValMgr.getConjuredSymbolVal(*I, FT, Count);
+ state = state.BindLoc(Loc::MakeVal(FR), V);
+ }
+ }
+ } else if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
+ // Set the default value of the array to conjured symbol.
+ StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
+ SVal V = ValMgr.getConjuredSymbolVal(*I, AT->getElementType(),
+ Count);
+ state = GRStateRef(StoreMgr.setDefaultValue(state, R, V),
+ StateMgr);
+ } else {
+ // Just blast away other values.
+ state = state.BindLoc(*MR, UnknownVal());
+ }
+ }
+ }
+ else
+ state = state.BindLoc(*MR, UnknownVal());
+ }
+ else {
+ // Nuke all other arguments passed by reference.
+ state = state.Unbind(cast<Loc>(V));
+ }
+ }
+ else if (isa<nonloc::LocAsInteger>(V))
+ state = state.Unbind(cast<nonloc::LocAsInteger>(V).getLoc());
+ }
+
+ // Evaluate the effect on the message receiver.
+ if (!ErrorExpr && Receiver) {
+ SymbolRef Sym = state.GetSValAsScalarOrLoc(Receiver).getAsLocSymbol();
+ if (Sym) {
+ if (const RefVal* T = state.get<RefBindings>(Sym)) {
+ state = Update(state, Sym, *T, Summ.getReceiverEffect(), hasErr);
+ if (hasErr) {
+ ErrorExpr = Receiver;
+ ErrorSym = Sym;
+ }
+ }
+ }
+ }
+
+ // Process any errors.
+ if (hasErr) {
+ ProcessNonLeakError(Dst, Builder, Ex, ErrorExpr, Pred, state,
+ hasErr, ErrorSym);
+ return;
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+
+ if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
+ assert(Receiver);
+ SVal V = state.GetSValAsScalarOrLoc(Receiver);
+ bool found = false;
+ if (SymbolRef Sym = V.getAsLocSymbol())
+ if (state.get<RefBindings>(Sym)) {
+ found = true;
+ RE = Summaries.getObjAllocRetEffect();
+ }
+
+ if (!found)
+ RE = RetEffect::MakeNoRet();
+ }
+
+ switch (RE.getKind()) {
+ default:
+ assert (false && "Unhandled RetEffect."); break;
+
+ case RetEffect::NoRet: {
+
+ // Make up a symbol for the return value (not reference counted).
+ // FIXME: This is basically copy-and-paste from GRSimpleVals. We
+ // should compose behavior, not copy it.
+
+ // FIXME: We eventually should handle structs and other compound types
+ // that are returned by value.
+
+ QualType T = Ex->getType();
+
+ if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())) {
+ unsigned Count = Builder.getCurrentBlockCount();
+ ValueManager &ValMgr = Eng.getValueManager();
+ SVal X = ValMgr.getConjuredSymbolVal(Ex, T, Count);
+ state = state.BindExpr(Ex, X, false);
+ }
+
+ break;
+ }
+
+ case RetEffect::Alias: {
+ unsigned idx = RE.getIndex();
+ assert (arg_end >= arg_beg);
+ assert (idx < (unsigned) (arg_end - arg_beg));
+ SVal V = state.GetSValAsScalarOrLoc(*(arg_beg+idx));
+ state = state.BindExpr(Ex, V, false);
+ break;
+ }
+
+ case RetEffect::ReceiverAlias: {
+ assert (Receiver);
+ SVal V = state.GetSValAsScalarOrLoc(Receiver);
+ state = state.BindExpr(Ex, V, false);
+ break;
+ }
+
+ case RetEffect::OwnedAllocatedSymbol:
+ case RetEffect::OwnedSymbol: {
+ unsigned Count = Builder.getCurrentBlockCount();
+ ValueManager &ValMgr = Eng.getValueManager();
+ SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ state = state.set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(),
+ RetT));
+ state = state.BindExpr(Ex, ValMgr.makeRegionVal(Sym), false);
+
+ // FIXME: Add a flag to the checker where allocations are assumed to
+ // *not fail.
+#if 0
+ if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) {
+ bool isFeasible;
+ state = state.Assume(loc::SymbolVal(Sym), true, isFeasible);
+ assert(isFeasible && "Cannot assume fresh symbol is non-null.");
+ }
+#endif
+
+ break;
+ }
+
+ case RetEffect::GCNotOwnedSymbol:
+ case RetEffect::NotOwnedSymbol: {
+ unsigned Count = Builder.getCurrentBlockCount();
+ ValueManager &ValMgr = Eng.getValueManager();
+ SymbolRef Sym = ValMgr.getConjuredSymbol(Ex, Count);
+ QualType RetT = GetReturnType(Ex, ValMgr.getContext());
+ state = state.set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(),
+ RetT));
+ state = state.BindExpr(Ex, ValMgr.makeRegionVal(Sym), false);
+ break;
+ }
+ }
+
+ // Generate a sink node if we are at the end of a path.
+ GRExprEngine::NodeTy *NewNode =
+ Summ.isEndPath() ? Builder.MakeSinkNode(Dst, Ex, Pred, state)
+ : Builder.MakeNode(Dst, Ex, Pred, state);
+
+ // Annotate the edge with summary we used.
+ if (NewNode) SummaryLog[NewNode] = &Summ;
+}
+
+
+void CFRefCount::EvalCall(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode<GRState>* Pred) {
+ const FunctionDecl* FD = L.getAsFunctionDecl();
+ RetainSummary* Summ = !FD ? Summaries.getDefaultSummary()
+ : Summaries.getSummary(const_cast<FunctionDecl*>(FD));
+
+ assert(Summ);
+ EvalSummary(Dst, Eng, Builder, CE, 0, *Summ,
+ CE->arg_begin(), CE->arg_end(), Pred);
+}
+
+void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode<GRState>* Pred) {
+ RetainSummary* Summ = 0;
+
+ if (Expr* Receiver = ME->getReceiver()) {
+ // We need the type-information of the tracked receiver object
+ // Retrieve it from the state.
+ const ObjCInterfaceDecl* ID = 0;
+
+ // FIXME: Wouldn't it be great if this code could be reduced? It's just
+ // a chain of lookups.
+ // FIXME: Is this really working as expected? There are cases where
+ // we just use the 'ID' from the message expression.
+ const GRState* St = Builder.GetState(Pred);
+ SVal V = Eng.getStateManager().GetSValAsScalarOrLoc(St, Receiver);
+
+ SymbolRef Sym = V.getAsLocSymbol();
+ if (Sym) {
+ if (const RefVal* T = St->get<RefBindings>(Sym)) {
+ QualType Ty = T->getType();
+
+ if (const PointerType* PT = Ty->getAsPointerType()) {
+ QualType PointeeTy = PT->getPointeeType();
+
+ if (ObjCInterfaceType* IT = dyn_cast<ObjCInterfaceType>(PointeeTy))
+ ID = IT->getDecl();
+ }
+ }
+ }
+
+ // FIXME: this is a hack. This may or may not be the actual method
+ // that is called.
+ if (!ID) {
+ if (const PointerType *PT = Receiver->getType()->getAsPointerType())
+ if (const ObjCInterfaceType *p =
+ PT->getPointeeType()->getAsObjCInterfaceType())
+ ID = p->getDecl();
+ }
+
+ // FIXME: The receiver could be a reference to a class, meaning that
+ // we should use the class method.
+ Summ = Summaries.getInstanceMethodSummary(ME, ID);
+
+ // Special-case: are we sending a mesage to "self"?
+ // This is a hack. When we have full-IP this should be removed.
+ if (isa<ObjCMethodDecl>(&Eng.getGraph().getCodeDecl())) {
+ if (Expr* Receiver = ME->getReceiver()) {
+ SVal X = Eng.getStateManager().GetSValAsScalarOrLoc(St, Receiver);
+ if (loc::MemRegionVal* L = dyn_cast<loc::MemRegionVal>(&X))
+ if (L->getRegion() == Eng.getStateManager().getSelfRegion(St)) {
+ // Update the summary to make the default argument effect
+ // 'StopTracking'.
+ Summ = Summaries.copySummary(Summ);
+ Summ->setDefaultArgEffect(StopTracking);
+ }
+ }
+ }
+ }
+ else
+ Summ = Summaries.getClassMethodSummary(ME);
+
+ if (!Summ)
+ Summ = Summaries.getDefaultSummary();
+
+ EvalSummary(Dst, Eng, Builder, ME, ME->getReceiver(), *Summ,
+ ME->arg_begin(), ME->arg_end(), Pred);
+}
+
+namespace {
+class VISIBILITY_HIDDEN StopTrackingCallback : public SymbolVisitor {
+ GRStateRef state;
+public:
+ StopTrackingCallback(GRStateRef st) : state(st) {}
+ GRStateRef getState() { return state; }
+
+ bool VisitSymbol(SymbolRef sym) {
+ state = state.remove<RefBindings>(sym);
+ return true;
+ }
+
+ const GRState* getState() const { return state.getState(); }
+};
+} // end anonymous namespace
+
+
+void CFRefCount::EvalBind(GRStmtNodeBuilderRef& B, SVal location, SVal val) {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = false;
+
+ // A value escapes in three possible cases (this may change):
+ //
+ // (1) we are binding to something that is not a memory region.
+ // (2) we are binding to a memregion that does not have stack storage
+ // (3) we are binding to a memregion with stack storage that the store
+ // does not understand.
+ GRStateRef state = B.getState();
+
+ if (!isa<loc::MemRegionVal>(location))
+ escapes = true;
+ else {
+ const MemRegion* R = cast<loc::MemRegionVal>(location).getRegion();
+ escapes = !B.getStateManager().hasStackStorage(R);
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding removed. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding).
+ escapes = (state == (state.BindLoc(cast<Loc>(location), UnknownVal())));
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ B.MakeNode(state.scanReachableSymbols<StopTrackingCallback>(val).getState());
+}
+
+
+ // Return statements.
+
+void CFRefCount::EvalReturn(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ReturnStmt* S,
+ ExplodedNode<GRState>* Pred) {
+
+ Expr* RetE = S->getRetValue();
+ if (!RetE)
+ return;
+
+ GRStateRef state(Builder.GetState(Pred), Eng.getStateManager());
+ SymbolRef Sym = state.GetSValAsScalarOrLoc(RetE).getAsLocSymbol();
+
+ if (!Sym)
+ return;
+
+ // Get the reference count binding (if any).
+ const RefVal* T = state.get<RefBindings>(Sym);
+
+ if (!T)
+ return;
+
+ // Change the reference count.
+ RefVal X = *T;
+
+ switch (X.getKind()) {
+ case RefVal::Owned: {
+ unsigned cnt = X.getCount();
+ assert (cnt > 0);
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ break;
+ }
+
+ case RefVal::NotOwned: {
+ unsigned cnt = X.getCount();
+ if (cnt) {
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ }
+ else {
+ X = X ^ RefVal::ReturnedNotOwned;
+ }
+ break;
+ }
+
+ default:
+ return;
+ }
+
+ // Update the binding.
+ state = state.set<RefBindings>(Sym, X);
+ Pred = Builder.MakeNode(Dst, S, Pred, state);
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Update the autorelease counts.
+ static unsigned autoreleasetag = 0;
+ GenericNodeBuilder Bd(Builder, S, &autoreleasetag);
+ bool stop = false;
+ llvm::tie(Pred, state) = HandleAutoreleaseCounts(state , Bd, Pred, Eng, Sym,
+ X, stop);
+
+ // Did we cache out?
+ if (!Pred || stop)
+ return;
+
+ // Get the updated binding.
+ T = state.get<RefBindings>(Sym);
+ assert(T);
+ X = *T;
+
+ // Any leaks or other errors?
+ if (X.isReturnedOwned() && X.getCount() == 0) {
+ const Decl *CD = &Eng.getStateManager().getCodeDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
+ RetEffect RE = Summ.getRetEffect();
+ bool hasError = false;
+
+ if (RE.getKind() != RetEffect::NoRet) {
+ if (isGCEnabled() && RE.getObjKind() == RetEffect::ObjC) {
+ // Things are more complicated with garbage collection. If the
+ // returned object is suppose to be an Objective-C object, we have
+ // a leak (as the caller expects a GC'ed object) because no
+ // method should return ownership unless it returns a CF object.
+ X = X ^ RefVal::ErrorGCLeakReturned;
+
+ // Keep this false until this is properly tested.
+ hasError = true;
+ }
+ else if (!RE.isOwned()) {
+ // Either we are using GC and the returned object is a CF type
+ // or we aren't using GC. In either case, we expect that the
+ // enclosing method is expected to return ownership.
+ hasError = true;
+ X = X ^ RefVal::ErrorLeakReturned;
+ }
+ }
+
+ if (hasError) {
+ // Generate an error node.
+ static int ReturnOwnLeakTag = 0;
+ state = state.set<RefBindings>(Sym, X);
+ ExplodedNode<GRState> *N =
+ Builder.generateNode(PostStmt(S, &ReturnOwnLeakTag), state, Pred);
+ if (N) {
+ CFRefReport *report =
+ new CFRefLeakReport(*static_cast<CFRefBug*>(leakAtReturn), *this,
+ N, Sym, Eng);
+ BR->EmitReport(report);
+ }
+ }
+ }
+ }
+ else if (X.isReturnedNotOwned()) {
+ const Decl *CD = &Eng.getStateManager().getCodeDecl();
+ if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ const RetainSummary &Summ = *Summaries.getMethodSummary(MD);
+ if (Summ.getRetEffect().isOwned()) {
+ // Trying to return a not owned object to a caller expecting an
+ // owned object.
+
+ static int ReturnNotOwnedForOwnedTag = 0;
+ state = state.set<RefBindings>(Sym, X ^ RefVal::ErrorReturnedNotOwned);
+ if (ExplodedNode<GRState> *N =
+ Builder.generateNode(PostStmt(S, &ReturnNotOwnedForOwnedTag),
+ state, Pred)) {
+ CFRefReport *report =
+ new CFRefReport(*static_cast<CFRefBug*>(returnNotOwnedForOwned),
+ *this, N, Sym);
+ BR->EmitReport(report);
+ }
+ }
+ }
+ }
+}
+
+// Assumptions.
+
+const GRState* CFRefCount::EvalAssume(GRStateManager& VMgr,
+ const GRState* St,
+ SVal Cond, bool Assumption,
+ bool& isFeasible) {
+
+ // FIXME: We may add to the interface of EvalAssume the list of symbols
+ // whose assumptions have changed. For now we just iterate through the
+ // bindings and check if any of the tracked symbols are NULL. This isn't
+ // too bad since the number of symbols we will track in practice are
+ // probably small and EvalAssume is only called at branches and a few
+ // other places.
+ RefBindings B = St->get<RefBindings>();
+
+ if (B.isEmpty())
+ return St;
+
+ bool changed = false;
+
+ GRStateRef state(St, VMgr);
+ RefBindings::Factory& RefBFactory = state.get_context<RefBindings>();
+
+ for (RefBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ // Check if the symbol is null (or equal to any constant).
+ // If this is the case, stop tracking the symbol.
+ if (VMgr.getSymVal(St, I.getKey())) {
+ changed = true;
+ B = RefBFactory.Remove(B, I.getKey());
+ }
+ }
+
+ if (changed)
+ state = state.set<RefBindings>(B);
+
+ return state;
+}
+
+GRStateRef CFRefCount::Update(GRStateRef state, SymbolRef sym,
+ RefVal V, ArgEffect E,
+ RefVal::Kind& hasErr) {
+
+ // In GC mode [... release] and [... retain] do nothing.
+ switch (E) {
+ default: break;
+ case IncRefMsg: E = isGCEnabled() ? DoNothing : IncRef; break;
+ case DecRefMsg: E = isGCEnabled() ? DoNothing : DecRef; break;
+ case MakeCollectable: E = isGCEnabled() ? DecRef : DoNothing; break;
+ case NewAutoreleasePool: E = isGCEnabled() ? DoNothing :
+ NewAutoreleasePool; break;
+ }
+
+ // Handle all use-after-releases.
+ if (!isGCEnabled() && V.getKind() == RefVal::Released) {
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ return state.set<RefBindings>(sym, V);
+ }
+
+ switch (E) {
+ default:
+ assert (false && "Unhandled CFRef transition.");
+
+ case Dealloc:
+ // Any use of -dealloc in GC is *bad*.
+ if (isGCEnabled()) {
+ V = V ^ RefVal::ErrorDeallocGC;
+ hasErr = V.getKind();
+ break;
+ }
+
+ switch (V.getKind()) {
+ default:
+ assert(false && "Invalid case.");
+ case RefVal::Owned:
+ // The object immediately transitions to the released state.
+ V = V ^ RefVal::Released;
+ V.clearCounts();
+ return state.set<RefBindings>(sym, V);
+ case RefVal::NotOwned:
+ V = V ^ RefVal::ErrorDeallocNotOwned;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+
+ case NewAutoreleasePool:
+ assert(!isGCEnabled());
+ return state.add<AutoreleaseStack>(sym);
+
+ case MayEscape:
+ if (V.getKind() == RefVal::Owned) {
+ V = V ^ RefVal::NotOwned;
+ break;
+ }
+
+ // Fall-through.
+
+ case DoNothingByRef:
+ case DoNothing:
+ return state;
+
+ case Autorelease:
+ if (isGCEnabled())
+ return state;
+
+ // Update the autorelease counts.
+ state = SendAutorelease(state, ARCountFactory, sym);
+ V = V.autorelease();
+ break;
+
+ case StopTracking:
+ return state.remove<RefBindings>(sym);
+
+ case IncRef:
+ switch (V.getKind()) {
+ default:
+ assert(false);
+
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ V = V + 1;
+ break;
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(isGCEnabled());
+ V = (V ^ RefVal::Owned) + 1;
+ break;
+ }
+ break;
+
+ case SelfOwn:
+ V = V ^ RefVal::NotOwned;
+ // Fall-through.
+ case DecRef:
+ switch (V.getKind()) {
+ default:
+ // case 'RefVal::Released' handled above.
+ assert (false);
+
+ case RefVal::Owned:
+ assert(V.getCount() > 0);
+ if (V.getCount() == 1) V = V ^ RefVal::Released;
+ V = V - 1;
+ break;
+
+ case RefVal::NotOwned:
+ if (V.getCount() > 0)
+ V = V - 1;
+ else {
+ V = V ^ RefVal::ErrorReleaseNotOwned;
+ hasErr = V.getKind();
+ }
+ break;
+
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(isGCEnabled());
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+ }
+ return state.set<RefBindings>(sym, V);
+}
+
+//===----------------------------------------------------------------------===//
+// Handle dead symbols and end-of-path.
+//===----------------------------------------------------------------------===//
+
+std::pair<ExplodedNode<GRState>*, GRStateRef>
+CFRefCount::HandleAutoreleaseCounts(GRStateRef state, GenericNodeBuilder Bd,
+ ExplodedNode<GRState>* Pred,
+ GRExprEngine &Eng,
+ SymbolRef Sym, RefVal V, bool &stop) {
+
+ unsigned ACnt = V.getAutoreleaseCount();
+ stop = false;
+
+ // No autorelease counts? Nothing to be done.
+ if (!ACnt)
+ return std::make_pair(Pred, state);
+
+ assert(!isGCEnabled() && "Autorelease counts in GC mode?");
+ unsigned Cnt = V.getCount();
+
+ // FIXME: Handle sending 'autorelease' to already released object.
+
+ if (V.getKind() == RefVal::ReturnedOwned)
+ ++Cnt;
+
+ if (ACnt <= Cnt) {
+ if (ACnt == Cnt) {
+ V.clearCounts();
+ if (V.getKind() == RefVal::ReturnedOwned)
+ V = V ^ RefVal::ReturnedNotOwned;
+ else
+ V = V ^ RefVal::NotOwned;
+ }
+ else {
+ V.setCount(Cnt - ACnt);
+ V.setAutoreleaseCount(0);
+ }
+ state = state.set<RefBindings>(Sym, V);
+ ExplodedNode<GRState> *N = Bd.MakeNode(state, Pred);
+ stop = (N == 0);
+ return std::make_pair(N, state);
+ }
+
+ // Woah! More autorelease counts then retain counts left.
+ // Emit hard error.
+ stop = true;
+ V = V ^ RefVal::ErrorOverAutorelease;
+ state = state.set<RefBindings>(Sym, V);
+
+ if (ExplodedNode<GRState> *N = Bd.MakeNode(state, Pred)) {
+ N->markAsSink();
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Object over-autoreleased: object was sent -autorelease";
+ if (V.getAutoreleaseCount() > 1)
+ os << V.getAutoreleaseCount() << " times";
+ os << " but the object has ";
+ if (V.getCount() == 0)
+ os << "zero (locally visible)";
+ else
+ os << "+" << V.getCount();
+ os << " retain counts";
+
+ CFRefReport *report =
+ new CFRefReport(*static_cast<CFRefBug*>(overAutorelease),
+ *this, N, Sym, os.str().c_str());
+ BR->EmitReport(report);
+ }
+
+ return std::make_pair((ExplodedNode<GRState>*)0, state);
+}
+
+GRStateRef
+CFRefCount::HandleSymbolDeath(GRStateRef state, SymbolRef sid, RefVal V,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked) {
+
+ bool hasLeak = V.isOwned() ||
+ ((V.isNotOwned() || V.isReturnedOwned()) && V.getCount() > 0);
+
+ if (!hasLeak)
+ return state.remove<RefBindings>(sid);
+
+ Leaked.push_back(sid);
+ return state.set<RefBindings>(sid, V ^ RefVal::ErrorLeak);
+}
+
+ExplodedNode<GRState>*
+CFRefCount::ProcessLeaks(GRStateRef state,
+ llvm::SmallVectorImpl<SymbolRef> &Leaked,
+ GenericNodeBuilder &Builder,
+ GRExprEngine& Eng,
+ ExplodedNode<GRState> *Pred) {
+
+ if (Leaked.empty())
+ return Pred;
+
+ // Generate an intermediate node representing the leak point.
+ ExplodedNode<GRState> *N = Builder.MakeNode(state, Pred);
+
+ if (N) {
+ for (llvm::SmallVectorImpl<SymbolRef>::iterator
+ I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
+
+ CFRefBug *BT = static_cast<CFRefBug*>(Pred ? leakWithinFunction
+ : leakAtReturn);
+ assert(BT && "BugType not initialized.");
+ CFRefLeakReport* report = new CFRefLeakReport(*BT, *this, N, *I, Eng);
+ BR->EmitReport(report);
+ }
+ }
+
+ return N;
+}
+
+void CFRefCount::EvalEndPath(GRExprEngine& Eng,
+ GREndPathNodeBuilder<GRState>& Builder) {
+
+ GRStateRef state(Builder.getState(), Eng.getStateManager());
+ GenericNodeBuilder Bd(Builder);
+ RefBindings B = state.get<RefBindings>();
+ ExplodedNode<GRState> *Pred = 0;
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ bool stop = false;
+ llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng,
+ (*I).first,
+ (*I).second, stop);
+
+ if (stop)
+ return;
+ }
+
+ B = state.get<RefBindings>();
+ llvm::SmallVector<SymbolRef, 10> Leaked;
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ state = HandleSymbolDeath(state, (*I).first, (*I).second, Leaked);
+
+ ProcessLeaks(state, Leaked, Bd, Eng, Pred);
+}
+
+void CFRefCount::EvalDeadSymbols(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ExplodedNode<GRState>* Pred,
+ Stmt* S,
+ const GRState* St,
+ SymbolReaper& SymReaper) {
+
+ GRStateRef state(St, Eng.getStateManager());
+ RefBindings B = state.get<RefBindings>();
+
+ // Update counts from autorelease pools
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ SymbolRef Sym = *I;
+ if (const RefVal* T = B.lookup(Sym)){
+ // Use the symbol as the tag.
+ // FIXME: This might not be as unique as we would like.
+ GenericNodeBuilder Bd(Builder, S, Sym);
+ bool stop = false;
+ llvm::tie(Pred, state) = HandleAutoreleaseCounts(state, Bd, Pred, Eng,
+ Sym, *T, stop);
+ if (stop)
+ return;
+ }
+ }
+
+ B = state.get<RefBindings>();
+ llvm::SmallVector<SymbolRef, 10> Leaked;
+
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ if (const RefVal* T = B.lookup(*I))
+ state = HandleSymbolDeath(state, *I, *T, Leaked);
+ }
+
+ static unsigned LeakPPTag = 0;
+ {
+ GenericNodeBuilder Bd(Builder, S, &LeakPPTag);
+ Pred = ProcessLeaks(state, Leaked, Bd, Eng, Pred);
+ }
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Now generate a new node that nukes the old bindings.
+ RefBindings::Factory& F = state.get_context<RefBindings>();
+
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I!=E; ++I) B = F.Remove(B, *I);
+
+ state = state.set<RefBindings>(B);
+ Builder.MakeNode(Dst, S, Pred, state);
+}
+
+void CFRefCount::ProcessNonLeakError(ExplodedNodeSet<GRState>& Dst,
+ GRStmtNodeBuilder<GRState>& Builder,
+ Expr* NodeExpr, Expr* ErrorExpr,
+ ExplodedNode<GRState>* Pred,
+ const GRState* St,
+ RefVal::Kind hasErr, SymbolRef Sym) {
+ Builder.BuildSinks = true;
+ GRExprEngine::NodeTy* N = Builder.MakeNode(Dst, NodeExpr, Pred, St);
+
+ if (!N)
+ return;
+
+ CFRefBug *BT = 0;
+
+ switch (hasErr) {
+ default:
+ assert(false && "Unhandled error.");
+ return;
+ case RefVal::ErrorUseAfterRelease:
+ BT = static_cast<CFRefBug*>(useAfterRelease);
+ break;
+ case RefVal::ErrorReleaseNotOwned:
+ BT = static_cast<CFRefBug*>(releaseNotOwned);
+ break;
+ case RefVal::ErrorDeallocGC:
+ BT = static_cast<CFRefBug*>(deallocGC);
+ break;
+ case RefVal::ErrorDeallocNotOwned:
+ BT = static_cast<CFRefBug*>(deallocNotOwned);
+ break;
+ }
+
+ CFRefReport *report = new CFRefReport(*BT, *this, N, Sym);
+ report->addRange(ErrorExpr->getSourceRange());
+ BR->EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function creation for external clients.
+//===----------------------------------------------------------------------===//
+
+GRTransferFuncs* clang::MakeCFRefCountTF(ASTContext& Ctx, bool GCEnabled,
+ const LangOptions& lopts) {
+ return new CFRefCount(Ctx, GCEnabled, lopts);
+}
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
new file mode 100644
index 0000000..9e8248f
--- /dev/null
+++ b/lib/Analysis/CMakeLists.txt
@@ -0,0 +1,36 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangAnalysis
+ BasicConstraintManager.cpp
+ BasicObjCFoundationChecks.cpp
+ BasicStore.cpp
+ BasicValueFactory.cpp
+ BugReporter.cpp
+ CFRefCount.cpp
+ CheckDeadStores.cpp
+ CheckNSError.cpp
+ CheckObjCDealloc.cpp
+ CheckObjCInstMethSignature.cpp
+ CheckObjCUnusedIVars.cpp
+ Environment.cpp
+ ExplodedGraph.cpp
+ GRBlockCounter.cpp
+ GRCoreEngine.cpp
+ GRExprEngine.cpp
+ GRExprEngineInternalChecks.cpp
+ GRSimpleVals.cpp
+ GRState.cpp
+ GRTransferFuncs.cpp
+ LiveVariables.cpp
+ MemRegion.cpp
+ PathDiagnostic.cpp
+ RangeConstraintManager.cpp
+ RegionStore.cpp
+ SimpleConstraintManager.cpp
+ Store.cpp
+ SVals.cpp
+ SymbolManager.cpp
+ UninitializedValues.cpp
+ )
+
+add_dependencies(clangAnalysis ClangDiagnosticAnalysis)
diff --git a/lib/Analysis/CheckDeadStores.cpp b/lib/Analysis/CheckDeadStores.cpp
new file mode 100644
index 0000000..69433d6
--- /dev/null
+++ b/lib/Analysis/CheckDeadStores.cpp
@@ -0,0 +1,259 @@
+//==- DeadStores.cpp - Check for stores to dead variables --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DeadStores, a flow-sensitive checker that looks for
+// stores to variables that are no longer live.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Visitors/CFGRecStmtVisitor.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace clang;
+
+namespace {
+
+class VISIBILITY_HIDDEN DeadStoreObs : public LiveVariables::ObserverTy {
+ ASTContext &Ctx;
+ BugReporter& BR;
+ ParentMap& Parents;
+ llvm::SmallPtrSet<VarDecl*, 20> Escaped;
+
+ enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
+
+public:
+ DeadStoreObs(ASTContext &ctx, BugReporter& br, ParentMap& parents,
+ llvm::SmallPtrSet<VarDecl*, 20> &escaped)
+ : Ctx(ctx), BR(br), Parents(parents), Escaped(escaped) {}
+
+ virtual ~DeadStoreObs() {}
+
+ void Report(VarDecl* V, DeadStoreKind dsk, SourceLocation L, SourceRange R) {
+ if (Escaped.count(V))
+ return;
+
+ std::string name = V->getNameAsString();
+
+ const char* BugType = 0;
+ std::string msg;
+
+ switch (dsk) {
+ default:
+ assert(false && "Impossible dead store type.");
+
+ case DeadInit:
+ BugType = "Dead initialization";
+ msg = "Value stored to '" + name +
+ "' during its initialization is never read";
+ break;
+
+ case DeadIncrement:
+ BugType = "Dead increment";
+ case Standard:
+ if (!BugType) BugType = "Dead assignment";
+ msg = "Value stored to '" + name + "' is never read";
+ break;
+
+ case Enclosing:
+ BugType = "Dead nested assignment";
+ msg = "Although the value stored to '" + name +
+ "' is used in the enclosing expression, the value is never actually"
+ " read from '" + name + "'";
+ break;
+ }
+
+ BR.EmitBasicReport(BugType, "Dead store", msg.c_str(), L, R);
+ }
+
+ void CheckVarDecl(VarDecl* VD, Expr* Ex, Expr* Val,
+ DeadStoreKind dsk,
+ const LiveVariables::AnalysisDataTy& AD,
+ const LiveVariables::ValTy& Live) {
+
+ if (VD->hasLocalStorage() && !Live(VD, AD) && !VD->getAttr<UnusedAttr>())
+ Report(VD, dsk, Ex->getSourceRange().getBegin(),
+ Val->getSourceRange());
+ }
+
+ void CheckDeclRef(DeclRefExpr* DR, Expr* Val, DeadStoreKind dsk,
+ const LiveVariables::AnalysisDataTy& AD,
+ const LiveVariables::ValTy& Live) {
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
+ CheckVarDecl(VD, DR, Val, dsk, AD, Live);
+ }
+
+ bool isIncrement(VarDecl* VD, BinaryOperator* B) {
+ if (B->isCompoundAssignmentOp())
+ return true;
+
+ Expr* RHS = B->getRHS()->IgnoreParenCasts();
+ BinaryOperator* BRHS = dyn_cast<BinaryOperator>(RHS);
+
+ if (!BRHS)
+ return false;
+
+ DeclRefExpr *DR;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getLHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getRHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ return false;
+ }
+
+ virtual void ObserveStmt(Stmt* S,
+ const LiveVariables::AnalysisDataTy& AD,
+ const LiveVariables::ValTy& Live) {
+
+ // Skip statements in macros.
+ if (S->getLocStart().isMacroID())
+ return;
+
+ if (BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (!B->isAssignmentOp()) return; // Skip non-assignments.
+
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(B->getLHS()))
+ if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ Expr* RHS = B->getRHS()->IgnoreParenCasts();
+
+ // Special case: check for assigning null to a pointer.
+ // This is a common form of defensive programming.
+ if (VD->getType()->isPointerType()) {
+ if (IntegerLiteral* L = dyn_cast<IntegerLiteral>(RHS))
+ // FIXME: Probably should have an Expr::isNullPointerConstant.
+ if (L->getValue() == 0)
+ return;
+ }
+ // Special case: self-assignments. These are often used to shut up
+ // "unused variable" compiler warnings.
+ if (DeclRefExpr* RhsDR = dyn_cast<DeclRefExpr>(RHS))
+ if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
+ return;
+
+ // Otherwise, issue a warning.
+ DeadStoreKind dsk = Parents.isConsumedExpr(B)
+ ? Enclosing
+ : (isIncrement(VD,B) ? DeadIncrement : Standard);
+
+ CheckVarDecl(VD, DR, B->getRHS(), dsk, AD, Live);
+ }
+ }
+ else if (UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
+ if (!U->isIncrementOp())
+ return;
+
+ // Handle: ++x within a subexpression. The solution is not warn
+ // about preincrements to dead variables when the preincrement occurs
+ // as a subexpression. This can lead to false negatives, e.g. "(++x);"
+ // A generalized dead code checker should find such issues.
+ if (U->isPrefix() && Parents.isConsumedExpr(U))
+ return;
+
+ Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
+
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(Ex))
+ CheckDeclRef(DR, U, DeadIncrement, AD, Live);
+ }
+ else if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
+ // Iterate through the decls. Warn if any initializers are complex
+ // expressions that are not live (never used).
+ for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
+ DI != DE; ++DI) {
+
+ VarDecl* V = dyn_cast<VarDecl>(*DI);
+
+ if (!V)
+ continue;
+
+ if (V->hasLocalStorage())
+ if (Expr* E = V->getInit()) {
+ // A dead initialization is a variable that is dead after it
+ // is initialized. We don't flag warnings for those variables
+ // marked 'unused'.
+ if (!Live(V, AD) && V->getAttr<UnusedAttr>() == 0) {
+ // Special case: check for initializations with constants.
+ //
+ // e.g. : int x = 0;
+ //
+ // If x is EVER assigned a new value later, don't issue
+ // a warning. This is because such initialization can be
+ // due to defensive programming.
+ if (E->isConstantInitializer(Ctx))
+ return;
+
+ // Special case: check for initializations from constant
+ // variables.
+ //
+ // e.g. extern const int MyConstant;
+ // int x = MyConstant;
+ //
+ if (DeclRefExpr *DRE=dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasGlobalStorage() &&
+ VD->getType().isConstQualified()) return;
+
+ Report(V, DeadInit, V->getLocation(), E->getSourceRange());
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Driver function to invoke the Dead-Stores checker on a CFG.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN FindEscaped : public CFGRecStmtDeclVisitor<FindEscaped>{
+ CFG *cfg;
+public:
+ FindEscaped(CFG *c) : cfg(c) {}
+
+ CFG& getCFG() { return *cfg; }
+
+ llvm::SmallPtrSet<VarDecl*, 20> Escaped;
+
+ void VisitUnaryOperator(UnaryOperator* U) {
+ // Check for '&'. Any VarDecl whose value has its address-taken we
+ // treat as escaped.
+ Expr* E = U->getSubExpr()->IgnoreParenCasts();
+ if (U->getOpcode() == UnaryOperator::AddrOf)
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E))
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ Escaped.insert(VD);
+ return;
+ }
+ Visit(E);
+ }
+};
+} // end anonymous namespace
+
+
+void clang::CheckDeadStores(LiveVariables& L, BugReporter& BR) {
+ FindEscaped FS(BR.getCFG());
+ FS.getCFG().VisitBlockStmts(FS);
+ DeadStoreObs A(BR.getContext(), BR, BR.getParentMap(), FS.Escaped);
+ L.runOnAllBlocks(*BR.getCFG(), &A);
+}
diff --git a/lib/Analysis/CheckNSError.cpp b/lib/Analysis/CheckNSError.cpp
new file mode 100644
index 0000000..ff9da0f
--- /dev/null
+++ b/lib/Analysis/CheckNSError.cpp
@@ -0,0 +1,231 @@
+//=- CheckNSError.cpp - Coding conventions for uses of NSError ---*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckNSError, a flow-insenstive check
+// that determines if an Objective-C class interface correctly returns
+// a non-void return type.
+//
+// File under feature request PR 2600.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "BasicObjCFoundationChecks.h"
+#include "llvm/Support/Compiler.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+
+namespace {
+class VISIBILITY_HIDDEN NSErrorCheck : public BugType {
+ const bool isNSErrorWarning;
+ IdentifierInfo * const II;
+ GRExprEngine &Eng;
+
+ void CheckSignature(ObjCMethodDecl& MD, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
+
+ void CheckSignature(FunctionDecl& MD, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams);
+
+ bool CheckNSErrorArgument(QualType ArgTy);
+ bool CheckCFErrorArgument(QualType ArgTy);
+
+ void CheckParamDeref(VarDecl* V, GRStateRef state, BugReporter& BR);
+
+ void EmitRetTyWarning(BugReporter& BR, Decl& CodeDecl);
+
+public:
+ NSErrorCheck(bool isNSError, GRExprEngine& eng)
+ : BugType(isNSError ? "NSError** null dereference"
+ : "CFErrorRef* null dereference",
+ "Coding Conventions (Apple)"),
+ isNSErrorWarning(isNSError),
+ II(&eng.getContext().Idents.get(isNSErrorWarning ? "NSError":"CFErrorRef")),
+ Eng(eng) {}
+
+ void FlushReports(BugReporter& BR);
+};
+
+} // end anonymous namespace
+
+void clang::RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng) {
+ BR.Register(new NSErrorCheck(true, Eng));
+ BR.Register(new NSErrorCheck(false, Eng));
+}
+
+void NSErrorCheck::FlushReports(BugReporter& BR) {
+ // Get the analysis engine and the exploded analysis graph.
+ GRExprEngine::GraphTy& G = Eng.getGraph();
+
+ // Get the declaration of the method/function that was analyzed.
+ Decl& CodeDecl = G.getCodeDecl();
+
+ // Get the ASTContext, which is useful for querying type information.
+ ASTContext &Ctx = BR.getContext();
+
+ QualType ResultTy;
+ llvm::SmallVector<VarDecl*, 5> ErrorParams;
+
+ if (ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(&CodeDecl))
+ CheckSignature(*MD, ResultTy, ErrorParams);
+ else if (FunctionDecl* FD = dyn_cast<FunctionDecl>(&CodeDecl))
+ CheckSignature(*FD, ResultTy, ErrorParams);
+ else
+ return;
+
+ if (ErrorParams.empty())
+ return;
+
+ if (ResultTy == Ctx.VoidTy) EmitRetTyWarning(BR, CodeDecl);
+
+ for (GRExprEngine::GraphTy::roots_iterator RI=G.roots_begin(),
+ RE=G.roots_end(); RI!=RE; ++RI) {
+ // Scan the parameters for an implicit null dereference.
+ for (llvm::SmallVectorImpl<VarDecl*>::iterator I=ErrorParams.begin(),
+ E=ErrorParams.end(); I!=E; ++I)
+ CheckParamDeref(*I, GRStateRef((*RI)->getState(),Eng.getStateManager()),
+ BR);
+
+ }
+}
+
+void NSErrorCheck::EmitRetTyWarning(BugReporter& BR, Decl& CodeDecl) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (isa<ObjCMethodDecl>(CodeDecl))
+ os << "Method";
+ else
+ os << "Function";
+
+ os << " accepting ";
+ os << (isNSErrorWarning ? "NSError**" : "CFErrorRef*");
+ os << " should have a non-void return value to indicate whether or not an "
+ "error occured.";
+
+ BR.EmitBasicReport(isNSErrorWarning
+ ? "Bad return type when passing NSError**"
+ : "Bad return type when passing CFError*",
+ getCategory().c_str(), os.str().c_str(),
+ CodeDecl.getLocation());
+}
+
+void
+NSErrorCheck::CheckSignature(ObjCMethodDecl& M, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
+
+ ResultTy = M.getResultType();
+
+ for (ObjCMethodDecl::param_iterator I=M.param_begin(),
+ E=M.param_end(); I!=E; ++I) {
+
+ QualType T = (*I)->getType();
+
+ if (isNSErrorWarning) {
+ if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
+ }
+ else if (CheckCFErrorArgument(T))
+ ErrorParams.push_back(*I);
+ }
+}
+
+void
+NSErrorCheck::CheckSignature(FunctionDecl& F, QualType& ResultTy,
+ llvm::SmallVectorImpl<VarDecl*>& ErrorParams) {
+
+ ResultTy = F.getResultType();
+
+ for (FunctionDecl::param_iterator I=F.param_begin(),
+ E=F.param_end(); I!=E; ++I) {
+
+ QualType T = (*I)->getType();
+
+ if (isNSErrorWarning) {
+ if (CheckNSErrorArgument(T)) ErrorParams.push_back(*I);
+ }
+ else if (CheckCFErrorArgument(T))
+ ErrorParams.push_back(*I);
+ }
+}
+
+
+bool NSErrorCheck::CheckNSErrorArgument(QualType ArgTy) {
+
+ const PointerType* PPT = ArgTy->getAsPointerType();
+ if (!PPT) return false;
+
+ const PointerType* PT = PPT->getPointeeType()->getAsPointerType();
+ if (!PT) return false;
+
+ const ObjCInterfaceType *IT =
+ PT->getPointeeType()->getAsObjCInterfaceType();
+
+ if (!IT) return false;
+ return IT->getDecl()->getIdentifier() == II;
+}
+
+bool NSErrorCheck::CheckCFErrorArgument(QualType ArgTy) {
+
+ const PointerType* PPT = ArgTy->getAsPointerType();
+ if (!PPT) return false;
+
+ const TypedefType* TT = PPT->getPointeeType()->getAsTypedefType();
+ if (!TT) return false;
+
+ return TT->getDecl()->getIdentifier() == II;
+}
+
+void NSErrorCheck::CheckParamDeref(VarDecl* Param, GRStateRef rootState,
+ BugReporter& BR) {
+
+ SVal ParamL = rootState.GetLValue(Param);
+ const MemRegion* ParamR = cast<loc::MemRegionVal>(ParamL).getRegionAs<VarRegion>();
+ assert (ParamR && "Parameters always have VarRegions.");
+ SVal ParamSVal = rootState.GetSVal(ParamR);
+
+ // FIXME: For now assume that ParamSVal is symbolic. We need to generalize
+ // this later.
+ SymbolRef ParamSym = ParamSVal.getAsLocSymbol();
+ if (!ParamSym)
+ return;
+
+ // Iterate over the implicit-null dereferences.
+ for (GRExprEngine::null_deref_iterator I=Eng.implicit_null_derefs_begin(),
+ E=Eng.implicit_null_derefs_end(); I!=E; ++I) {
+
+ GRStateRef state = GRStateRef((*I)->getState(), Eng.getStateManager());
+ const SVal* X = state.get<GRState::NullDerefTag>();
+
+ if (!X || X->getAsSymbol() != ParamSym)
+ continue;
+
+ // Emit an error.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Potential null dereference. According to coding standards ";
+
+ if (isNSErrorWarning)
+ os << "in 'Creating and Returning NSError Objects' the parameter '";
+ else
+ os << "documented in CoreFoundation/CFError.h the parameter '";
+
+ os << Param->getNameAsString() << "' may be null.";
+
+ BugReport *report = new BugReport(*this, os.str().c_str(), *I);
+ // FIXME: Notable symbols are now part of the report. We should
+ // add support for notable symbols in BugReport.
+ // BR.addNotableSymbol(SV->getSymbol());
+ BR.EmitReport(report);
+ }
+}
diff --git a/lib/Analysis/CheckObjCDealloc.cpp b/lib/Analysis/CheckObjCDealloc.cpp
new file mode 100644
index 0000000..f50d7a1
--- /dev/null
+++ b/lib/Analysis/CheckObjCDealloc.cpp
@@ -0,0 +1,257 @@
+//==- CheckObjCDealloc.cpp - Check ObjC -dealloc implementation --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCDealloc, a checker that
+// analyzes an Objective-C class's implementation to determine if it
+// correctly implements -dealloc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+static bool scan_dealloc(Stmt* S, Selector Dealloc) {
+
+ if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getSelector() == Dealloc)
+ if(ME->getReceiver())
+ if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
+ return isa<ObjCSuperExpr>(Receiver);
+
+ // Recurse to children.
+
+ for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+ if (*I && scan_dealloc(*I, Dealloc))
+ return true;
+
+ return false;
+}
+
+static bool scan_ivar_release(Stmt* S, ObjCIvarDecl* ID,
+ const ObjCPropertyDecl* PD,
+ Selector Release,
+ IdentifierInfo* SelfII,
+ ASTContext& Ctx) {
+
+ // [mMyIvar release]
+ if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getSelector() == Release)
+ if(ME->getReceiver())
+ if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
+ if (ObjCIvarRefExpr* E = dyn_cast<ObjCIvarRefExpr>(Receiver))
+ if (E->getDecl() == ID)
+ return true;
+
+ // [self setMyIvar:nil];
+ if (ObjCMessageExpr* ME = dyn_cast<ObjCMessageExpr>(S))
+ if(ME->getReceiver())
+ if (Expr* Receiver = ME->getReceiver()->IgnoreParenCasts())
+ if (DeclRefExpr* E = dyn_cast<DeclRefExpr>(Receiver))
+ if (E->getDecl()->getIdentifier() == SelfII)
+ if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
+ ME->getNumArgs() == 1 &&
+ ME->getArg(0)->isNullPointerConstant(Ctx))
+ return true;
+
+ // self.myIvar = nil;
+ if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
+ if (BO->isAssignmentOp())
+ if(ObjCPropertyRefExpr* PRE =
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
+ if(PRE->getProperty() == PD)
+ if(BO->getRHS()->isNullPointerConstant(Ctx)) {
+ // This is only a 'release' if the property kind is not
+ // 'assign'.
+ return PD->getSetterKind() != ObjCPropertyDecl::Assign;;
+ }
+
+ // Recurse to children.
+ for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+ if (*I && scan_ivar_release(*I, ID, PD, Release, SelfII, Ctx))
+ return true;
+
+ return false;
+}
+
+void clang::CheckObjCDealloc(ObjCImplementationDecl* D,
+ const LangOptions& LOpts, BugReporter& BR) {
+
+ assert (LOpts.getGCMode() != LangOptions::GCOnly);
+
+ ASTContext& Ctx = BR.getContext();
+ ObjCInterfaceDecl* ID = D->getClassInterface();
+
+ // Does the class contain any ivars that are pointers (or id<...>)?
+ // If not, skip the check entirely.
+ // NOTE: This is motivated by PR 2517:
+ // http://llvm.org/bugs/show_bug.cgi?id=2517
+
+ bool containsPointerIvar = false;
+
+ for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
+ I!=E; ++I) {
+
+ ObjCIvarDecl* ID = *I;
+ QualType T = ID->getType();
+
+ if (!Ctx.isObjCObjectPointerType(T) ||
+ ID->getAttr<IBOutletAttr>()) // Skip IBOutlets.
+ continue;
+
+ containsPointerIvar = true;
+ break;
+ }
+
+ if (!containsPointerIvar)
+ return;
+
+ // Determine if the class subclasses NSObject.
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
+
+
+ for ( ; ID ; ID = ID->getSuperClass()) {
+ IdentifierInfo *II = ID->getIdentifier();
+
+ if (II == NSObjectII)
+ break;
+
+ // FIXME: For now, ignore classes that subclass SenTestCase, as these don't
+ // need to implement -dealloc. They implement tear down in another way,
+ // which we should try and catch later.
+ // http://llvm.org/bugs/show_bug.cgi?id=3187
+ if (II == SenTestCaseII)
+ return;
+ }
+
+ if (!ID)
+ return;
+
+ // Get the "dealloc" selector.
+ IdentifierInfo* II = &Ctx.Idents.get("dealloc");
+ Selector S = Ctx.Selectors.getSelector(0, &II);
+ ObjCMethodDecl* MD = 0;
+
+ // Scan the instance methods for "dealloc".
+ for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(Ctx),
+ E = D->instmeth_end(Ctx); I!=E; ++I) {
+
+ if ((*I)->getSelector() == S) {
+ MD = *I;
+ break;
+ }
+ }
+
+ if (!MD) { // No dealloc found.
+
+ const char* name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing -dealloc"
+ : "missing -dealloc (Hybrid MM, non-GC)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "Objective-C class '" << D->getNameAsString()
+ << "' lacks a 'dealloc' instance method";
+
+ BR.EmitBasicReport(name, os.str().c_str(), D->getLocStart());
+ return;
+ }
+
+ // dealloc found. Scan for missing [super dealloc].
+ if (MD->getBody(Ctx) && !scan_dealloc(MD->getBody(Ctx), S)) {
+
+ const char* name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing [super dealloc]"
+ : "missing [super dealloc] (Hybrid MM, non-GC)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "The 'dealloc' instance method in Objective-C class '"
+ << D->getNameAsString()
+ << "' does not send a 'dealloc' message to its super class"
+ " (missing [super dealloc])";
+
+ BR.EmitBasicReport(name, os.str().c_str(), D->getLocStart());
+ return;
+ }
+
+ // Get the "release" selector.
+ IdentifierInfo* RII = &Ctx.Idents.get("release");
+ Selector RS = Ctx.Selectors.getSelector(0, &RII);
+
+ // Get the "self" identifier
+ IdentifierInfo* SelfII = &Ctx.Idents.get("self");
+
+ // Scan for missing and extra releases of ivars used by implementations
+ // of synthesized properties
+ for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(Ctx),
+ E = D->propimpl_end(Ctx); I!=E; ++I) {
+
+ // We can only check the synthesized properties
+ if((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+
+ ObjCIvarDecl* ID = (*I)->getPropertyIvarDecl();
+ if (!ID)
+ continue;
+
+ QualType T = ID->getType();
+ if (!Ctx.isObjCObjectPointerType(T)) // Skip non-pointer ivars
+ continue;
+
+ const ObjCPropertyDecl* PD = (*I)->getPropertyDecl();
+ if(!PD)
+ continue;
+
+ // ivars cannot be set via read-only properties, so we'll skip them
+ if(PD->isReadOnly())
+ continue;
+
+ // ivar must be released if and only if the kind of setter was not 'assign'
+ bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
+ if(scan_ivar_release(MD->getBody(Ctx), ID, PD, RS, SelfII, Ctx)
+ != requiresRelease) {
+ const char *name;
+ const char* category = "Memory (Core Foundation/Objective-C)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+
+ if(requiresRelease) {
+ name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "missing ivar release (leak)"
+ : "missing ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << ID->getNameAsString()
+ << "' instance variable was retained by a synthesized property but "
+ "wasn't released in 'dealloc'";
+ } else {
+ name = LOpts.getGCMode() == LangOptions::NonGC
+ ? "extra ivar release (use-after-release)"
+ : "extra ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << ID->getNameAsString()
+ << "' instance variable was not retained by a synthesized property "
+ "but was released in 'dealloc'";
+ }
+
+ BR.EmitBasicReport(name, category,
+ os.str().c_str(), (*I)->getLocation());
+ }
+ }
+}
+
diff --git a/lib/Analysis/CheckObjCInstMethSignature.cpp b/lib/Analysis/CheckObjCInstMethSignature.cpp
new file mode 100644
index 0000000..9fec7c1
--- /dev/null
+++ b/lib/Analysis/CheckObjCInstMethSignature.cpp
@@ -0,0 +1,120 @@
+//=- CheckObjCInstMethodRetTy.cpp - Check ObjC method signatures -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCInstMethSignature, a flow-insenstive check
+// that determines if an Objective-C class interface incorrectly redefines
+// the method signature in a subclass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/ASTContext.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+static bool AreTypesCompatible(QualType Derived, QualType Ancestor,
+ ASTContext& C) {
+
+ // Right now don't compare the compatibility of pointers. That involves
+ // looking at subtyping relationships. FIXME: Future patch.
+ if ((Derived->isPointerType() || Derived->isObjCQualifiedIdType()) &&
+ (Ancestor->isPointerType() || Ancestor->isObjCQualifiedIdType()))
+ return true;
+
+ return C.typesAreCompatible(Derived, Ancestor);
+}
+
+static void CompareReturnTypes(ObjCMethodDecl* MethDerived,
+ ObjCMethodDecl* MethAncestor,
+ BugReporter& BR, ASTContext& Ctx,
+ ObjCImplementationDecl* ID) {
+
+ QualType ResDerived = MethDerived->getResultType();
+ QualType ResAncestor = MethAncestor->getResultType();
+
+ if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "The Objective-C class '"
+ << MethDerived->getClassInterface()->getNameAsString()
+ << "', which is derived from class '"
+ << MethAncestor->getClassInterface()->getNameAsString()
+ << "', defines the instance method '"
+ << MethDerived->getSelector().getAsString()
+ << "' whose return type is '"
+ << ResDerived.getAsString()
+ << "'. A method with the same name (same selector) is also defined in "
+ "class '"
+ << MethAncestor->getClassInterface()->getNameAsString()
+ << "' and has a return type of '"
+ << ResAncestor.getAsString()
+ << "'. These two types are incompatible, and may result in undefined "
+ "behavior for clients of these classes.";
+
+ BR.EmitBasicReport("Incompatible instance method return type",
+ os.str().c_str(), MethDerived->getLocStart());
+ }
+}
+
+void clang::CheckObjCInstMethSignature(ObjCImplementationDecl* ID,
+ BugReporter& BR) {
+
+ ObjCInterfaceDecl* D = ID->getClassInterface();
+ ObjCInterfaceDecl* C = D->getSuperClass();
+
+ if (!C)
+ return;
+
+ ASTContext& Ctx = BR.getContext();
+
+ // Build a DenseMap of the methods for quick querying.
+ typedef llvm::DenseMap<Selector,ObjCMethodDecl*> MapTy;
+ MapTy IMeths;
+ unsigned NumMethods = 0;
+
+ for (ObjCImplementationDecl::instmeth_iterator I=ID->instmeth_begin(Ctx),
+ E=ID->instmeth_end(Ctx); I!=E; ++I) {
+
+ ObjCMethodDecl* M = *I;
+ IMeths[M->getSelector()] = M;
+ ++NumMethods;
+ }
+
+ // Now recurse the class hierarchy chain looking for methods with the
+ // same signatures.
+ while (C && NumMethods) {
+ for (ObjCInterfaceDecl::instmeth_iterator I=C->instmeth_begin(Ctx),
+ E=C->instmeth_end(Ctx); I!=E; ++I) {
+
+ ObjCMethodDecl* M = *I;
+ Selector S = M->getSelector();
+
+ MapTy::iterator MI = IMeths.find(S);
+
+ if (MI == IMeths.end() || MI->second == 0)
+ continue;
+
+ --NumMethods;
+ ObjCMethodDecl* MethDerived = MI->second;
+ MI->second = 0;
+
+ CompareReturnTypes(MethDerived, M, BR, Ctx, ID);
+ }
+
+ C = C->getSuperClass();
+ }
+}
diff --git a/lib/Analysis/CheckObjCUnusedIVars.cpp b/lib/Analysis/CheckObjCUnusedIVars.cpp
new file mode 100644
index 0000000..7979f9c
--- /dev/null
+++ b/lib/Analysis/CheckObjCUnusedIVars.cpp
@@ -0,0 +1,111 @@
+//==- CheckObjCUnusedIVars.cpp - Check for unused ivars ----------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCUnusedIvars, a checker that
+// analyzes an Objective-C class's interface/implementation to determine if it
+// has any ivars that are never accessed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include <sstream>
+
+using namespace clang;
+
+enum IVarState { Unused, Used };
+typedef llvm::DenseMap<ObjCIvarDecl*,IVarState> IvarUsageMap;
+
+static void Scan(IvarUsageMap& M, Stmt* S) {
+ if (!S)
+ return;
+
+ if (ObjCIvarRefExpr* Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
+ ObjCIvarDecl* D = Ex->getDecl();
+ IvarUsageMap::iterator I = M.find(D);
+ if (I != M.end()) I->second = Used;
+ return;
+ }
+
+ for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E;++I)
+ Scan(M, *I);
+}
+
+static void Scan(IvarUsageMap& M, ObjCPropertyImplDecl* D) {
+ if (!D)
+ return;
+
+ ObjCIvarDecl* ID = D->getPropertyIvarDecl();
+
+ if (!ID)
+ return;
+
+ IvarUsageMap::iterator I = M.find(ID);
+ if (I != M.end()) I->second = Used;
+}
+
+void clang::CheckObjCUnusedIvar(ObjCImplementationDecl* D, BugReporter& BR) {
+
+ ObjCInterfaceDecl* ID = D->getClassInterface();
+ IvarUsageMap M;
+
+
+ ASTContext &Ctx = BR.getContext();
+
+ // Iterate over the ivars.
+ for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
+ I!=E; ++I) {
+
+ ObjCIvarDecl* ID = *I;
+
+ // Ignore ivars that aren't private.
+ if (ID->getAccessControl() != ObjCIvarDecl::Private)
+ continue;
+
+ // Skip IB Outlets.
+ if (ID->getAttr<IBOutletAttr>())
+ continue;
+
+ M[ID] = Unused;
+ }
+
+ if (M.empty())
+ return;
+
+ // Now scan the methods for accesses.
+ for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(Ctx),
+ E = D->instmeth_end(Ctx); I!=E; ++I)
+ Scan(M, (*I)->getBody(Ctx));
+
+ // Scan for @synthesized property methods that act as setters/getters
+ // to an ivar.
+ for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(Ctx),
+ E = D->propimpl_end(Ctx); I!=E; ++I)
+ Scan(M, *I);
+
+ // Find ivars that are unused.
+ for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+ if (I->second == Unused) {
+
+ std::ostringstream os;
+ os << "Instance variable '" << I->first->getNameAsString()
+ << "' in class '" << ID->getNameAsString()
+ << "' is never used by the methods in its @implementation "
+ "(although it may be used by category methods).";
+
+ BR.EmitBasicReport("Unused instance variable", "Optimization",
+ os.str().c_str(), I->first->getLocation());
+ }
+}
+
diff --git a/lib/Analysis/Environment.cpp b/lib/Analysis/Environment.cpp
new file mode 100644
index 0000000..2bc071a
--- /dev/null
+++ b/lib/Analysis/Environment.cpp
@@ -0,0 +1,167 @@
+//== Environment.cpp - Map from Stmt* to Locations/Values -------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the Environment and EnvironmentManager classes.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace clang;
+
+SVal Environment::GetSVal(Stmt* E, BasicValueFactory& BasicVals) const {
+
+ for (;;) {
+
+ switch (E->getStmtClass()) {
+
+ case Stmt::AddrLabelExprClass:
+ return Loc::MakeVal(cast<AddrLabelExpr>(E));
+
+ // ParenExprs are no-ops.
+
+ case Stmt::ParenExprClass:
+ E = cast<ParenExpr>(E)->getSubExpr();
+ continue;
+
+ case Stmt::CharacterLiteralClass: {
+ CharacterLiteral* C = cast<CharacterLiteral>(E);
+ return NonLoc::MakeVal(BasicVals, C->getValue(), C->getType());
+ }
+
+ case Stmt::IntegerLiteralClass: {
+ return NonLoc::MakeVal(BasicVals, cast<IntegerLiteral>(E));
+ }
+
+ // Casts where the source and target type are the same
+ // are no-ops. We blast through these to get the descendant
+ // subexpression that has a value.
+
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass: {
+ CastExpr* C = cast<CastExpr>(E);
+ QualType CT = C->getType();
+
+ if (CT->isVoidType())
+ return UnknownVal();
+
+ break;
+ }
+
+ // Handle all other Stmt* using a lookup.
+
+ default:
+ break;
+ };
+
+ break;
+ }
+
+ return LookupExpr(E);
+}
+
+SVal Environment::GetBlkExprSVal(Stmt* E, BasicValueFactory& BasicVals) const {
+
+ while (1) {
+ switch (E->getStmtClass()) {
+ case Stmt::ParenExprClass:
+ E = cast<ParenExpr>(E)->getSubExpr();
+ continue;
+
+ case Stmt::CharacterLiteralClass: {
+ CharacterLiteral* C = cast<CharacterLiteral>(E);
+ return NonLoc::MakeVal(BasicVals, C->getValue(), C->getType());
+ }
+
+ case Stmt::IntegerLiteralClass: {
+ return NonLoc::MakeVal(BasicVals, cast<IntegerLiteral>(E));
+ }
+
+ default:
+ return LookupBlkExpr(E);
+ }
+ }
+}
+
+Environment EnvironmentManager::BindExpr(const Environment& Env, Stmt* E,SVal V,
+ bool isBlkExpr, bool Invalidate) {
+ assert (E);
+
+ if (V.isUnknown()) {
+ if (Invalidate)
+ return isBlkExpr ? RemoveBlkExpr(Env, E) : RemoveSubExpr(Env, E);
+ else
+ return Env;
+ }
+
+ return isBlkExpr ? AddBlkExpr(Env, E, V) : AddSubExpr(Env, E, V);
+}
+
+namespace {
+class VISIBILITY_HIDDEN MarkLiveCallback : public SymbolVisitor {
+ SymbolReaper &SymReaper;
+public:
+ MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+ bool VisitSymbol(SymbolRef sym) { SymReaper.markLive(sym); return true; }
+};
+} // end anonymous namespace
+
+// RemoveDeadBindings:
+// - Remove subexpression bindings.
+// - Remove dead block expression bindings.
+// - Keep live block expression bindings:
+// - Mark their reachable symbols live in SymbolReaper,
+// see ScanReachableSymbols.
+// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
+
+Environment
+EnvironmentManager::RemoveDeadBindings(Environment Env, Stmt* Loc,
+ SymbolReaper& SymReaper,
+ GRStateManager& StateMgr,
+ const GRState *state,
+ llvm::SmallVectorImpl<const MemRegion*>& DRoots) {
+
+ // Drop bindings for subexpressions.
+ Env = RemoveSubExprBindings(Env);
+
+ // Iterate over the block-expr bindings.
+ for (Environment::beb_iterator I = Env.beb_begin(), E = Env.beb_end();
+ I != E; ++I) {
+ Stmt* BlkExpr = I.getKey();
+
+ if (SymReaper.isLive(Loc, BlkExpr)) {
+ SVal X = I.getData();
+
+ // If the block expr's value is a memory region, then mark that region.
+ if (isa<loc::MemRegionVal>(X))
+ DRoots.push_back(cast<loc::MemRegionVal>(X).getRegion());
+
+ // Mark all symbols in the block expr's value live.
+ MarkLiveCallback cb(SymReaper);
+ StateMgr.scanReachableSymbols(X, state, cb);
+ } else {
+ // The block expr is dead.
+ SVal X = I.getData();
+
+ // Do not misclean LogicalExpr or ConditionalOperator. It is dead at the
+ // beginning of itself, but we need its UndefinedVal to determine its
+ // SVal.
+
+ if (X.isUndef() && cast<UndefinedVal>(X).getData())
+ continue;
+
+ Env = RemoveBlkExpr(Env, BlkExpr);
+ }
+ }
+
+ return Env;
+}
diff --git a/lib/Analysis/ExplodedGraph.cpp b/lib/Analysis/ExplodedGraph.cpp
new file mode 100644
index 0000000..20de6c4
--- /dev/null
+++ b/lib/Analysis/ExplodedGraph.cpp
@@ -0,0 +1,241 @@
+//=-- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -*- C++ -*------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the template classes ExplodedNode and ExplodedGraph,
+// which represent a path-sensitive, intra-procedural "exploded graph."
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/ExplodedGraph.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Node auditing.
+//===----------------------------------------------------------------------===//
+
+// An out of line virtual method to provide a home for the class vtable.
+ExplodedNodeImpl::Auditor::~Auditor() {}
+
+#ifndef NDEBUG
+static ExplodedNodeImpl::Auditor* NodeAuditor = 0;
+#endif
+
+void ExplodedNodeImpl::SetAuditor(ExplodedNodeImpl::Auditor* A) {
+#ifndef NDEBUG
+ NodeAuditor = A;
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// ExplodedNodeImpl.
+//===----------------------------------------------------------------------===//
+
+static inline std::vector<ExplodedNodeImpl*>& getVector(void* P) {
+ return *reinterpret_cast<std::vector<ExplodedNodeImpl*>*>(P);
+}
+
+void ExplodedNodeImpl::addPredecessor(ExplodedNodeImpl* V) {
+ assert (!V->isSink());
+ Preds.addNode(V);
+ V->Succs.addNode(this);
+#ifndef NDEBUG
+ if (NodeAuditor) NodeAuditor->AddEdge(V, this);
+#endif
+}
+
+void ExplodedNodeImpl::NodeGroup::addNode(ExplodedNodeImpl* N) {
+
+ assert ((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
+ assert (!getFlag());
+
+ if (getKind() == Size1) {
+ if (ExplodedNodeImpl* NOld = getNode()) {
+ std::vector<ExplodedNodeImpl*>* V = new std::vector<ExplodedNodeImpl*>();
+ assert ((reinterpret_cast<uintptr_t>(V) & Mask) == 0x0);
+ V->push_back(NOld);
+ V->push_back(N);
+ P = reinterpret_cast<uintptr_t>(V) | SizeOther;
+ assert (getPtr() == (void*) V);
+ assert (getKind() == SizeOther);
+ }
+ else {
+ P = reinterpret_cast<uintptr_t>(N);
+ assert (getKind() == Size1);
+ }
+ }
+ else {
+ assert (getKind() == SizeOther);
+ getVector(getPtr()).push_back(N);
+ }
+}
+
+
+unsigned ExplodedNodeImpl::NodeGroup::size() const {
+ if (getFlag())
+ return 0;
+
+ if (getKind() == Size1)
+ return getNode() ? 1 : 0;
+ else
+ return getVector(getPtr()).size();
+}
+
+ExplodedNodeImpl** ExplodedNodeImpl::NodeGroup::begin() const {
+ if (getFlag())
+ return NULL;
+
+ if (getKind() == Size1)
+ return (ExplodedNodeImpl**) (getPtr() ? &P : NULL);
+ else
+ return const_cast<ExplodedNodeImpl**>(&*(getVector(getPtr()).begin()));
+}
+
+ExplodedNodeImpl** ExplodedNodeImpl::NodeGroup::end() const {
+ if (getFlag())
+ return NULL;
+
+ if (getKind() == Size1)
+ return (ExplodedNodeImpl**) (getPtr() ? &P+1 : NULL);
+ else {
+ // Dereferencing end() is undefined behaviour. The vector is not empty, so
+ // we can dereference the last elem and then add 1 to the result.
+ return const_cast<ExplodedNodeImpl**>(&getVector(getPtr()).back()) + 1;
+ }
+}
+
+ExplodedNodeImpl::NodeGroup::~NodeGroup() {
+ if (getKind() == SizeOther) delete &getVector(getPtr());
+}
+
+ExplodedGraphImpl*
+ExplodedGraphImpl::Trim(const ExplodedNodeImpl* const* BeginSources,
+ const ExplodedNodeImpl* const* EndSources,
+ InterExplodedGraphMapImpl* M,
+ llvm::DenseMap<const void*, const void*> *InverseMap)
+const {
+
+ typedef llvm::DenseSet<const ExplodedNodeImpl*> Pass1Ty;
+ Pass1Ty Pass1;
+
+ typedef llvm::DenseMap<const ExplodedNodeImpl*, ExplodedNodeImpl*> Pass2Ty;
+ Pass2Ty& Pass2 = M->M;
+
+ llvm::SmallVector<const ExplodedNodeImpl*, 10> WL1, WL2;
+
+ // ===- Pass 1 (reverse DFS) -===
+ for (const ExplodedNodeImpl* const* I = BeginSources; I != EndSources; ++I) {
+ assert(*I);
+ WL1.push_back(*I);
+ }
+
+ // Process the first worklist until it is empty. Because it is a std::list
+ // it acts like a FIFO queue.
+ while (!WL1.empty()) {
+ const ExplodedNodeImpl *N = WL1.back();
+ WL1.pop_back();
+
+ // Have we already visited this node? If so, continue to the next one.
+ if (Pass1.count(N))
+ continue;
+
+ // Otherwise, mark this node as visited.
+ Pass1.insert(N);
+
+ // If this is a root enqueue it to the second worklist.
+ if (N->Preds.empty()) {
+ WL2.push_back(N);
+ continue;
+ }
+
+ // Visit our predecessors and enqueue them.
+ for (ExplodedNodeImpl** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I)
+ WL1.push_back(*I);
+ }
+
+ // We didn't hit a root? Return with a null pointer for the new graph.
+ if (WL2.empty())
+ return 0;
+
+ // Create an empty graph.
+ ExplodedGraphImpl* G = MakeEmptyGraph();
+
+ // ===- Pass 2 (forward DFS to construct the new graph) -===
+ while (!WL2.empty()) {
+ const ExplodedNodeImpl* N = WL2.back();
+ WL2.pop_back();
+
+ // Skip this node if we have already processed it.
+ if (Pass2.find(N) != Pass2.end())
+ continue;
+
+ // Create the corresponding node in the new graph and record the mapping
+ // from the old node to the new node.
+ ExplodedNodeImpl* NewN = G->getNodeImpl(N->getLocation(), N->State, NULL);
+ Pass2[N] = NewN;
+
+ // Also record the reverse mapping from the new node to the old node.
+ if (InverseMap) (*InverseMap)[NewN] = N;
+
+ // If this node is a root, designate it as such in the graph.
+ if (N->Preds.empty())
+ G->addRoot(NewN);
+
+ // In the case that some of the intended predecessors of NewN have already
+ // been created, we should hook them up as predecessors.
+
+ // Walk through the predecessors of 'N' and hook up their corresponding
+ // nodes in the new graph (if any) to the freshly created node.
+ for (ExplodedNodeImpl **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI == Pass2.end())
+ continue;
+
+ NewN->addPredecessor(PI->second);
+ }
+
+ // In the case that some of the intended successors of NewN have already
+ // been created, we should hook them up as successors. Otherwise, enqueue
+ // the new nodes from the original graph that should have nodes created
+ // in the new graph.
+ for (ExplodedNodeImpl **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI != Pass2.end()) {
+ PI->second->addPredecessor(NewN);
+ continue;
+ }
+
+ // Enqueue nodes to the worklist that were marked during pass 1.
+ if (Pass1.count(*I))
+ WL2.push_back(*I);
+ }
+
+ // Finally, explictly mark all nodes without any successors as sinks.
+ if (N->isSink())
+ NewN->markAsSink();
+ }
+
+ return G;
+}
+
+ExplodedNodeImpl*
+InterExplodedGraphMapImpl::getMappedImplNode(const ExplodedNodeImpl* N) const {
+ llvm::DenseMap<const ExplodedNodeImpl*, ExplodedNodeImpl*>::iterator I =
+ M.find(N);
+
+ return I == M.end() ? 0 : I->second;
+}
+
+InterExplodedGraphMapImpl::InterExplodedGraphMapImpl() {}
+
diff --git a/lib/Analysis/GRBlockCounter.cpp b/lib/Analysis/GRBlockCounter.cpp
new file mode 100644
index 0000000..f69a16d
--- /dev/null
+++ b/lib/Analysis/GRBlockCounter.cpp
@@ -0,0 +1,54 @@
+//==- GRBlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines GRBlockCounter, an abstract data type used to count
+// the number of times a given block has been visited along a path
+// analyzed by GRCoreEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/GRBlockCounter.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+
+typedef llvm::ImmutableMap<unsigned,unsigned> CountMap;
+
+static inline CountMap GetMap(void* D) {
+ return CountMap(static_cast<CountMap::TreeTy*>(D));
+}
+
+static inline CountMap::Factory& GetFactory(void* F) {
+ return *static_cast<CountMap::Factory*>(F);
+}
+
+unsigned GRBlockCounter::getNumVisited(unsigned BlockID) const {
+ CountMap M = GetMap(Data);
+ CountMap::data_type* T = M.lookup(BlockID);
+ return T ? *T : 0;
+}
+
+GRBlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) {
+ F = new CountMap::Factory(Alloc);
+}
+
+GRBlockCounter::Factory::~Factory() {
+ delete static_cast<CountMap::Factory*>(F);
+}
+
+GRBlockCounter
+GRBlockCounter::Factory::IncrementCount(GRBlockCounter BC, unsigned BlockID) {
+ return GRBlockCounter(GetFactory(F).Add(GetMap(BC.Data), BlockID,
+ BC.getNumVisited(BlockID)+1).getRoot());
+}
+
+GRBlockCounter
+GRBlockCounter::Factory::GetEmptyCounter() {
+ return GRBlockCounter(GetFactory(F).GetEmptyMap().getRoot());
+}
diff --git a/lib/Analysis/GRCoreEngine.cpp b/lib/Analysis/GRCoreEngine.cpp
new file mode 100644
index 0000000..ff7b548
--- /dev/null
+++ b/lib/Analysis/GRCoreEngine.cpp
@@ -0,0 +1,576 @@
+//==- GRCoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a generic engine for intraprocedural, path-sensitive,
+// dataflow analysis via graph reachability engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/GRCoreEngine.h"
+#include "clang/AST/Expr.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+#include <queue>
+
+using llvm::cast;
+using llvm::isa;
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Worklist classes for exploration of reachable states.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VISIBILITY_HIDDEN DFS : public GRWorkList {
+ llvm::SmallVector<GRWorkListUnit,20> Stack;
+public:
+ virtual bool hasWork() const {
+ return !Stack.empty();
+ }
+
+ virtual void Enqueue(const GRWorkListUnit& U) {
+ Stack.push_back(U);
+ }
+
+ virtual GRWorkListUnit Dequeue() {
+ assert (!Stack.empty());
+ const GRWorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+};
+
+class VISIBILITY_HIDDEN BFS : public GRWorkList {
+ std::queue<GRWorkListUnit> Queue;
+public:
+ virtual bool hasWork() const {
+ return !Queue.empty();
+ }
+
+ virtual void Enqueue(const GRWorkListUnit& U) {
+ Queue.push(U);
+ }
+
+ virtual GRWorkListUnit Dequeue() {
+ // Don't use const reference. The subsequent pop_back() might make it
+ // unsafe.
+ GRWorkListUnit U = Queue.front();
+ Queue.pop();
+ return U;
+ }
+};
+
+} // end anonymous namespace
+
+// Place the dstor for GRWorkList here because it contains virtual member
+// functions, and we the code for the dstor generated in one compilation unit.
+GRWorkList::~GRWorkList() {}
+
+GRWorkList *GRWorkList::MakeDFS() { return new DFS(); }
+GRWorkList *GRWorkList::MakeBFS() { return new BFS(); }
+
+namespace {
+ class VISIBILITY_HIDDEN BFSBlockDFSContents : public GRWorkList {
+ std::queue<GRWorkListUnit> Queue;
+ llvm::SmallVector<GRWorkListUnit,20> Stack;
+ public:
+ virtual bool hasWork() const {
+ return !Queue.empty() || !Stack.empty();
+ }
+
+ virtual void Enqueue(const GRWorkListUnit& U) {
+ if (isa<BlockEntrance>(U.getNode()->getLocation()))
+ Queue.push(U);
+ else
+ Stack.push_back(U);
+ }
+
+ virtual GRWorkListUnit Dequeue() {
+ // Process all basic blocks to completion.
+ if (!Stack.empty()) {
+ const GRWorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+
+ assert(!Queue.empty());
+ // Don't use const reference. The subsequent pop_back() might make it
+ // unsafe.
+ GRWorkListUnit U = Queue.front();
+ Queue.pop();
+ return U;
+ }
+ };
+} // end anonymous namespace
+
+GRWorkList* GRWorkList::MakeBFSBlockDFSContents() {
+ return new BFSBlockDFSContents();
+}
+
+//===----------------------------------------------------------------------===//
+// Core analysis engine.
+//===----------------------------------------------------------------------===//
+
+/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
+bool GRCoreEngineImpl::ExecuteWorkList(unsigned Steps) {
+
+ if (G->num_roots() == 0) { // Initialize the analysis by constructing
+ // the root if none exists.
+
+ CFGBlock* Entry = &getCFG().getEntry();
+
+ assert (Entry->empty() &&
+ "Entry block must be empty.");
+
+ assert (Entry->succ_size() == 1 &&
+ "Entry block must have 1 successor.");
+
+ // Get the solitary successor.
+ CFGBlock* Succ = *(Entry->succ_begin());
+
+ // Construct an edge representing the
+ // starting location in the function.
+ BlockEdge StartLoc(Entry, Succ);
+
+ // Set the current block counter to being empty.
+ WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
+
+ // Generate the root.
+ GenerateNode(StartLoc, getInitialState(), 0);
+ }
+
+ while (Steps && WList->hasWork()) {
+ --Steps;
+ const GRWorkListUnit& WU = WList->Dequeue();
+
+ // Set the current block counter.
+ WList->setBlockCounter(WU.getBlockCounter());
+
+ // Retrieve the node.
+ ExplodedNodeImpl* Node = WU.getNode();
+
+ // Dispatch on the location type.
+ switch (Node->getLocation().getKind()) {
+ case ProgramPoint::BlockEdgeKind:
+ HandleBlockEdge(cast<BlockEdge>(Node->getLocation()), Node);
+ break;
+
+ case ProgramPoint::BlockEntranceKind:
+ HandleBlockEntrance(cast<BlockEntrance>(Node->getLocation()), Node);
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false && "BlockExit location never occur in forward analysis.");
+ break;
+
+ default:
+ assert(isa<PostStmt>(Node->getLocation()));
+ HandlePostStmt(cast<PostStmt>(Node->getLocation()), WU.getBlock(),
+ WU.getIndex(), Node);
+ break;
+ }
+ }
+
+ return WList->hasWork();
+}
+
+void GRCoreEngineImpl::HandleBlockEdge(const BlockEdge& L,
+ ExplodedNodeImpl* Pred) {
+
+ CFGBlock* Blk = L.getDst();
+
+ // Check if we are entering the EXIT block.
+ if (Blk == &getCFG().getExit()) {
+
+ assert (getCFG().getExit().size() == 0
+ && "EXIT block cannot contain Stmts.");
+
+ // Process the final state transition.
+ GREndPathNodeBuilderImpl Builder(Blk, Pred, this);
+ ProcessEndPath(Builder);
+
+ // This path is done. Don't enqueue any more nodes.
+ return;
+ }
+
+ // FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
+
+ if (ProcessBlockEntrance(Blk, Pred->State, WList->getBlockCounter()))
+ GenerateNode(BlockEntrance(Blk), Pred->State, Pred);
+}
+
+void GRCoreEngineImpl::HandleBlockEntrance(const BlockEntrance& L,
+ ExplodedNodeImpl* Pred) {
+
+ // Increment the block counter.
+ GRBlockCounter Counter = WList->getBlockCounter();
+ Counter = BCounterFactory.IncrementCount(Counter, L.getBlock()->getBlockID());
+ WList->setBlockCounter(Counter);
+
+ // Process the entrance of the block.
+ if (Stmt* S = L.getFirstStmt()) {
+ GRStmtNodeBuilderImpl Builder(L.getBlock(), 0, Pred, this);
+ ProcessStmt(S, Builder);
+ }
+ else
+ HandleBlockExit(L.getBlock(), Pred);
+}
+
+GRCoreEngineImpl::~GRCoreEngineImpl() {
+ delete WList;
+}
+
+void GRCoreEngineImpl::HandleBlockExit(CFGBlock * B, ExplodedNodeImpl* Pred) {
+
+ if (Stmt* Term = B->getTerminator()) {
+ switch (Term->getStmtClass()) {
+ default:
+ assert(false && "Analysis for this terminator not implemented.");
+ break;
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
+ return;
+
+ case Stmt::ConditionalOperatorClass:
+ HandleBranch(cast<ConditionalOperator>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ // FIXME: Use constant-folding in CFG construction to simplify this
+ // case.
+
+ case Stmt::ChooseExprClass:
+ HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::DoStmtClass:
+ HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ForStmtClass:
+ HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ContinueStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::GotoStmtClass:
+ break;
+
+ case Stmt::IfStmtClass:
+ HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::IndirectGotoStmtClass: {
+ // Only 1 successor: the indirect goto dispatch block.
+ assert (B->succ_size() == 1);
+
+ GRIndirectGotoNodeBuilderImpl
+ builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
+ *(B->succ_begin()), this);
+
+ ProcessIndirectGoto(builder);
+ return;
+ }
+
+ case Stmt::ObjCForCollectionStmtClass: {
+ // In the case of ObjCForCollectionStmt, it appears twice in a CFG:
+ //
+ // (1) inside a basic block, which represents the binding of the
+ // 'element' variable to a value.
+ // (2) in a terminator, which represents the branch.
+ //
+ // For (1), subengines will bind a value (i.e., 0 or 1) indicating
+ // whether or not collection contains any more elements. We cannot
+ // just test to see if the element is nil because a container can
+ // contain nil elements.
+ HandleBranch(Term, Term, B, Pred);
+ return;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ GRSwitchNodeBuilderImpl builder(Pred, B,
+ cast<SwitchStmt>(Term)->getCond(),
+ this);
+
+ ProcessSwitch(builder);
+ return;
+ }
+
+ case Stmt::WhileStmtClass:
+ HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+ }
+ }
+
+ assert (B->succ_size() == 1 &&
+ "Blocks with no terminator should have at most 1 successor.");
+
+ GenerateNode(BlockEdge(B, *(B->succ_begin())), Pred->State, Pred);
+}
+
+void GRCoreEngineImpl::HandleBranch(Stmt* Cond, Stmt* Term, CFGBlock * B,
+ ExplodedNodeImpl* Pred) {
+ assert (B->succ_size() == 2);
+
+ GRBranchNodeBuilderImpl Builder(B, *(B->succ_begin()), *(B->succ_begin()+1),
+ Pred, this);
+
+ ProcessBranch(Cond, Term, Builder);
+}
+
+void GRCoreEngineImpl::HandlePostStmt(const PostStmt& L, CFGBlock* B,
+ unsigned StmtIdx, ExplodedNodeImpl* Pred) {
+
+ assert (!B->empty());
+
+ if (StmtIdx == B->size())
+ HandleBlockExit(B, Pred);
+ else {
+ GRStmtNodeBuilderImpl Builder(B, StmtIdx, Pred, this);
+ ProcessStmt((*B)[StmtIdx], Builder);
+ }
+}
+
+/// GenerateNode - Utility method to generate nodes, hook up successors,
+/// and add nodes to the worklist.
+void GRCoreEngineImpl::GenerateNode(const ProgramPoint& Loc, const void* State,
+ ExplodedNodeImpl* Pred) {
+
+ bool IsNew;
+ ExplodedNodeImpl* Node = G->getNodeImpl(Loc, State, &IsNew);
+
+ if (Pred)
+ Node->addPredecessor(Pred); // Link 'Node' with its predecessor.
+ else {
+ assert (IsNew);
+ G->addRoot(Node); // 'Node' has no predecessor. Make it a root.
+ }
+
+ // Only add 'Node' to the worklist if it was freshly generated.
+ if (IsNew) WList->Enqueue(Node);
+}
+
+GRStmtNodeBuilderImpl::GRStmtNodeBuilderImpl(CFGBlock* b, unsigned idx,
+ ExplodedNodeImpl* N, GRCoreEngineImpl* e)
+ : Eng(*e), B(*b), Idx(idx), Pred(N), LastNode(N) {
+ Deferred.insert(N);
+}
+
+GRStmtNodeBuilderImpl::~GRStmtNodeBuilderImpl() {
+ for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
+ if (!(*I)->isSink())
+ GenerateAutoTransition(*I);
+}
+
+void GRStmtNodeBuilderImpl::GenerateAutoTransition(ExplodedNodeImpl* N) {
+ assert (!N->isSink());
+
+ PostStmt Loc(getStmt());
+
+ if (Loc == N->getLocation()) {
+ // Note: 'N' should be a fresh node because otherwise it shouldn't be
+ // a member of Deferred.
+ Eng.WList->Enqueue(N, B, Idx+1);
+ return;
+ }
+
+ bool IsNew;
+ ExplodedNodeImpl* Succ = Eng.G->getNodeImpl(Loc, N->State, &IsNew);
+ Succ->addPredecessor(N);
+
+ if (IsNew)
+ Eng.WList->Enqueue(Succ, B, Idx+1);
+}
+
+static inline PostStmt GetPostLoc(Stmt* S, ProgramPoint::Kind K,
+ const void *tag) {
+ switch (K) {
+ default:
+ assert(false && "Invalid PostXXXKind.");
+
+ case ProgramPoint::PostStmtKind:
+ return PostStmt(S, tag);
+
+ case ProgramPoint::PostLoadKind:
+ return PostLoad(S, tag);
+
+ case ProgramPoint::PostUndefLocationCheckFailedKind:
+ return PostUndefLocationCheckFailed(S, tag);
+
+ case ProgramPoint::PostLocationChecksSucceedKind:
+ return PostLocationChecksSucceed(S, tag);
+
+ case ProgramPoint::PostOutOfBoundsCheckFailedKind:
+ return PostOutOfBoundsCheckFailed(S, tag);
+
+ case ProgramPoint::PostNullCheckFailedKind:
+ return PostNullCheckFailed(S, tag);
+
+ case ProgramPoint::PostStoreKind:
+ return PostStore(S, tag);
+
+ case ProgramPoint::PostLValueKind:
+ return PostLValue(S, tag);
+
+ case ProgramPoint::PostPurgeDeadSymbolsKind:
+ return PostPurgeDeadSymbols(S, tag);
+ }
+}
+
+ExplodedNodeImpl*
+GRStmtNodeBuilderImpl::generateNodeImpl(Stmt* S, const void* State,
+ ExplodedNodeImpl* Pred,
+ ProgramPoint::Kind K,
+ const void *tag) {
+ return generateNodeImpl(GetPostLoc(S, K, tag), State, Pred);
+}
+
+ExplodedNodeImpl*
+GRStmtNodeBuilderImpl::generateNodeImpl(PostStmt Loc, const void* State,
+ ExplodedNodeImpl* Pred) {
+ bool IsNew;
+ ExplodedNodeImpl* N = Eng.G->getNodeImpl(Loc, State, &IsNew);
+ N->addPredecessor(Pred);
+ Deferred.erase(Pred);
+
+ if (IsNew) {
+ Deferred.insert(N);
+ LastNode = N;
+ return N;
+ }
+
+ LastNode = NULL;
+ return NULL;
+}
+
+ExplodedNodeImpl* GRBranchNodeBuilderImpl::generateNodeImpl(const void* State,
+ bool branch) {
+ bool IsNew;
+
+ ExplodedNodeImpl* Succ =
+ Eng.G->getNodeImpl(BlockEdge(Src, branch ? DstT : DstF), State, &IsNew);
+
+ Succ->addPredecessor(Pred);
+
+ if (branch) GeneratedTrue = true;
+ else GeneratedFalse = true;
+
+ if (IsNew) {
+ Deferred.push_back(Succ);
+ return Succ;
+ }
+
+ return NULL;
+}
+
+GRBranchNodeBuilderImpl::~GRBranchNodeBuilderImpl() {
+ if (!GeneratedTrue) generateNodeImpl(Pred->State, true);
+ if (!GeneratedFalse) generateNodeImpl(Pred->State, false);
+
+ for (DeferredTy::iterator I=Deferred.begin(), E=Deferred.end(); I!=E; ++I)
+ if (!(*I)->isSink()) Eng.WList->Enqueue(*I);
+}
+
+
+ExplodedNodeImpl*
+GRIndirectGotoNodeBuilderImpl::generateNodeImpl(const Iterator& I,
+ const void* St,
+ bool isSink) {
+ bool IsNew;
+
+ ExplodedNodeImpl* Succ =
+ Eng.G->getNodeImpl(BlockEdge(Src, I.getBlock()), St, &IsNew);
+
+ Succ->addPredecessor(Pred);
+
+ if (IsNew) {
+
+ if (isSink)
+ Succ->markAsSink();
+ else
+ Eng.WList->Enqueue(Succ);
+
+ return Succ;
+ }
+
+ return NULL;
+}
+
+
+ExplodedNodeImpl*
+GRSwitchNodeBuilderImpl::generateCaseStmtNodeImpl(const Iterator& I,
+ const void* St) {
+
+ bool IsNew;
+
+ ExplodedNodeImpl* Succ = Eng.G->getNodeImpl(BlockEdge(Src, I.getBlock()),
+ St, &IsNew);
+ Succ->addPredecessor(Pred);
+
+ if (IsNew) {
+ Eng.WList->Enqueue(Succ);
+ return Succ;
+ }
+
+ return NULL;
+}
+
+
+ExplodedNodeImpl*
+GRSwitchNodeBuilderImpl::generateDefaultCaseNodeImpl(const void* St,
+ bool isSink) {
+
+ // Get the block for the default case.
+ assert (Src->succ_rbegin() != Src->succ_rend());
+ CFGBlock* DefaultBlock = *Src->succ_rbegin();
+
+ bool IsNew;
+
+ ExplodedNodeImpl* Succ = Eng.G->getNodeImpl(BlockEdge(Src, DefaultBlock),
+ St, &IsNew);
+ Succ->addPredecessor(Pred);
+
+ if (IsNew) {
+ if (isSink)
+ Succ->markAsSink();
+ else
+ Eng.WList->Enqueue(Succ);
+
+ return Succ;
+ }
+
+ return NULL;
+}
+
+GREndPathNodeBuilderImpl::~GREndPathNodeBuilderImpl() {
+ // Auto-generate an EOP node if one has not been generated.
+ if (!HasGeneratedNode) generateNodeImpl(Pred->State);
+}
+
+ExplodedNodeImpl*
+GREndPathNodeBuilderImpl::generateNodeImpl(const void* State,
+ const void *tag,
+ ExplodedNodeImpl* P) {
+ HasGeneratedNode = true;
+ bool IsNew;
+
+ ExplodedNodeImpl* Node =
+ Eng.G->getNodeImpl(BlockEntrance(&B, tag), State, &IsNew);
+
+ Node->addPredecessor(P ? P : Pred);
+
+ if (IsNew) {
+ Eng.G->addEndOfPath(Node);
+ return Node;
+ }
+
+ return NULL;
+}
diff --git a/lib/Analysis/GRExprEngine.cpp b/lib/Analysis/GRExprEngine.cpp
new file mode 100644
index 0000000..e8c5be5
--- /dev/null
+++ b/lib/Analysis/GRExprEngine.cpp
@@ -0,0 +1,3426 @@
+//=-- GRExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a meta-engine for path-sensitive dataflow analysis that
+// is built on GREngine, but provides the boilerplate to execute transfer
+// functions and build the ExplodedGraph at the expression level.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/PathSensitive/GRExprEngineBuilders.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/GraphWriter.h"
+#include <sstream>
+#endif
+
+using namespace clang;
+using llvm::dyn_cast;
+using llvm::cast;
+using llvm::APSInt;
+
+//===----------------------------------------------------------------------===//
+// Engine construction and deletion.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN MappedBatchAuditor : public GRSimpleAPICheck {
+ typedef llvm::ImmutableList<GRSimpleAPICheck*> Checks;
+ typedef llvm::DenseMap<void*,Checks> MapTy;
+
+ MapTy M;
+ Checks::Factory F;
+ Checks AllStmts;
+
+public:
+ MappedBatchAuditor(llvm::BumpPtrAllocator& Alloc) :
+ F(Alloc), AllStmts(F.GetEmptyList()) {}
+
+ virtual ~MappedBatchAuditor() {
+ llvm::DenseSet<GRSimpleAPICheck*> AlreadyVisited;
+
+ for (MapTy::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI)
+ for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E;++I){
+
+ GRSimpleAPICheck* check = *I;
+
+ if (AlreadyVisited.count(check))
+ continue;
+
+ AlreadyVisited.insert(check);
+ delete check;
+ }
+ }
+
+ void AddCheck(GRSimpleAPICheck *A, Stmt::StmtClass C) {
+ assert (A && "Check cannot be null.");
+ void* key = reinterpret_cast<void*>((uintptr_t) C);
+ MapTy::iterator I = M.find(key);
+ M[key] = F.Concat(A, I == M.end() ? F.GetEmptyList() : I->second);
+ }
+
+ void AddCheck(GRSimpleAPICheck *A) {
+ assert (A && "Check cannot be null.");
+ AllStmts = F.Concat(A, AllStmts);
+ }
+
+ virtual bool Audit(NodeTy* N, GRStateManager& VMgr) {
+ // First handle the auditors that accept all statements.
+ bool isSink = false;
+ for (Checks::iterator I = AllStmts.begin(), E = AllStmts.end(); I!=E; ++I)
+ isSink |= (*I)->Audit(N, VMgr);
+
+ // Next handle the auditors that accept only specific statements.
+ Stmt* S = cast<PostStmt>(N->getLocation()).getStmt();
+ void* key = reinterpret_cast<void*>((uintptr_t) S->getStmtClass());
+ MapTy::iterator MI = M.find(key);
+ if (MI != M.end()) {
+ for (Checks::iterator I=MI->second.begin(), E=MI->second.end(); I!=E; ++I)
+ isSink |= (*I)->Audit(N, VMgr);
+ }
+
+ return isSink;
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Engine construction and deletion.
+//===----------------------------------------------------------------------===//
+
+static inline Selector GetNullarySelector(const char* name, ASTContext& Ctx) {
+ IdentifierInfo* II = &Ctx.Idents.get(name);
+ return Ctx.Selectors.getSelector(0, &II);
+}
+
+
+GRExprEngine::GRExprEngine(CFG& cfg, Decl& CD, ASTContext& Ctx,
+ LiveVariables& L, BugReporterData& BRD,
+ bool purgeDead, bool eagerlyAssume,
+ StoreManagerCreator SMC,
+ ConstraintManagerCreator CMC)
+ : CoreEngine(cfg, CD, Ctx, *this),
+ G(CoreEngine.getGraph()),
+ Liveness(L),
+ Builder(NULL),
+ StateMgr(G.getContext(), SMC, CMC, G.getAllocator(), cfg, CD, L),
+ SymMgr(StateMgr.getSymbolManager()),
+ ValMgr(StateMgr.getValueManager()),
+ CurrentStmt(NULL),
+ NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
+ RaiseSel(GetNullarySelector("raise", G.getContext())),
+ PurgeDead(purgeDead),
+ BR(BRD, *this),
+ EagerlyAssume(eagerlyAssume) {}
+
+GRExprEngine::~GRExprEngine() {
+ BR.FlushReports();
+ delete [] NSExceptionInstanceRaiseSelectors;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+
+void GRExprEngine::setTransferFunctions(GRTransferFuncs* tf) {
+ StateMgr.TF = tf;
+ tf->RegisterChecks(getBugReporter());
+ tf->RegisterPrinters(getStateManager().Printers);
+}
+
+void GRExprEngine::AddCheck(GRSimpleAPICheck* A, Stmt::StmtClass C) {
+ if (!BatchAuditor)
+ BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
+
+ ((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A, C);
+}
+
+void GRExprEngine::AddCheck(GRSimpleAPICheck *A) {
+ if (!BatchAuditor)
+ BatchAuditor.reset(new MappedBatchAuditor(getGraph().getAllocator()));
+
+ ((MappedBatchAuditor*) BatchAuditor.get())->AddCheck(A);
+}
+
+const GRState* GRExprEngine::getInitialState() {
+ const GRState *state = StateMgr.getInitialState();
+
+ // Precondition: the first argument of 'main' is an integer guaranteed
+ // to be > 0.
+ // FIXME: It would be nice if we had a more general mechanism to add
+ // such preconditions. Some day.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(&StateMgr.getCodeDecl()))
+ if (strcmp(FD->getIdentifier()->getName(), "main") == 0 &&
+ FD->getNumParams() > 0) {
+ const ParmVarDecl *PD = FD->getParamDecl(0);
+ QualType T = PD->getType();
+ if (T->isIntegerType())
+ if (const MemRegion *R = StateMgr.getRegion(PD)) {
+ SVal V = GetSVal(state, loc::MemRegionVal(R));
+ SVal Constraint = EvalBinOp(state, BinaryOperator::GT, V,
+ ValMgr.makeZeroVal(T),
+ getContext().IntTy);
+ bool isFeasible = false;
+ const GRState *newState = Assume(state, Constraint, true,
+ isFeasible);
+ if (newState) state = newState;
+ }
+ }
+
+ return state;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level transfer function logic (Dispatcher).
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::ProcessStmt(Stmt* S, StmtNodeBuilder& builder) {
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ S->getLocStart(),
+ "Error evaluating statement");
+
+ Builder = &builder;
+ EntryNode = builder.getLastNode();
+
+ // FIXME: Consolidate.
+ CurrentStmt = S;
+ StateMgr.CurrentStmt = S;
+
+ // Set up our simple checks.
+ if (BatchAuditor)
+ Builder->setAuditor(BatchAuditor.get());
+
+ // Create the cleaned state.
+ SymbolReaper SymReaper(Liveness, SymMgr);
+ CleanedState = PurgeDead ? StateMgr.RemoveDeadBindings(EntryNode->getState(),
+ CurrentStmt, SymReaper)
+ : EntryNode->getState();
+
+ // Process any special transfer function for dead symbols.
+ NodeSet Tmp;
+
+ if (!SymReaper.hasDeadSymbols())
+ Tmp.Add(EntryNode);
+ else {
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+
+ SaveAndRestore<bool> OldPurgeDeadSymbols(Builder->PurgingDeadSymbols);
+ Builder->PurgingDeadSymbols = true;
+
+ getTF().EvalDeadSymbols(Tmp, *this, *Builder, EntryNode, S,
+ CleanedState, SymReaper);
+
+ if (!Builder->BuildSinks && !Builder->HasGeneratedNode)
+ Tmp.Add(EntryNode);
+ }
+
+ bool HasAutoGenerated = false;
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ NodeSet Dst;
+
+ // Set the cleaned state.
+ Builder->SetCleanedState(*I == EntryNode ? CleanedState : GetState(*I));
+
+ // Visit the statement.
+ Visit(S, *I, Dst);
+
+ // Do we need to auto-generate a node? We only need to do this to generate
+ // a node with a "cleaned" state; GRCoreEngine will actually handle
+ // auto-transitions for other cases.
+ if (Dst.size() == 1 && *Dst.begin() == EntryNode
+ && !Builder->HasGeneratedNode && !HasAutoGenerated) {
+ HasAutoGenerated = true;
+ builder.generateNode(S, GetState(EntryNode), *I);
+ }
+ }
+
+ // NULL out these variables to cleanup.
+ CleanedState = NULL;
+ EntryNode = NULL;
+
+ // FIXME: Consolidate.
+ StateMgr.CurrentStmt = 0;
+ CurrentStmt = 0;
+
+ Builder = NULL;
+}
+
+void GRExprEngine::Visit(Stmt* S, NodeTy* Pred, NodeSet& Dst) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ S->getLocStart(),
+ "Error evaluating statement");
+
+ // FIXME: add metadata to the CFG so that we can disable
+ // this check when we KNOW that there is no block-level subexpression.
+ // The motivation is that this check requires a hashtable lookup.
+
+ if (S != CurrentStmt && getCFG().isBlkExpr(S)) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ switch (S->getStmtClass()) {
+
+ default:
+ // Cases we intentionally have "default" handle:
+ // AddrLabelExpr, IntegerLiteral, CharacterLiteral
+
+ Dst.Add(Pred); // No-op. Simply propagate the current state unchanged.
+ break;
+
+ case Stmt::ArraySubscriptExprClass:
+ VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::AsmStmtClass:
+ VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator* B = cast<BinaryOperator>(S);
+
+ if (B->isLogicalOp()) {
+ VisitLogicalExpr(B, Pred, Dst);
+ break;
+ }
+ else if (B->getOpcode() == BinaryOperator::Comma) {
+ const GRState* state = GetState(Pred);
+ MakeNode(Dst, B, Pred, BindExpr(state, B, GetSVal(state, B->getRHS())));
+ break;
+ }
+
+ if (EagerlyAssume && (B->isRelationalOp() || B->isEqualityOp())) {
+ NodeSet Tmp;
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
+ EvalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
+ }
+ else
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+
+ break;
+ }
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXOperatorCallExprClass: {
+ CallExpr* C = cast<CallExpr>(S);
+ VisitCall(C, Pred, C->arg_begin(), C->arg_end(), Dst);
+ break;
+ }
+
+ // FIXME: ChooseExpr is really a constant. We need to fix
+ // the CFG do not model them as explicit control-flow.
+
+ case Stmt::ChooseExprClass: { // __builtin_choose_expr
+ ChooseExpr* C = cast<ChooseExpr>(S);
+ VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ break;
+ }
+
+ case Stmt::CompoundAssignOperatorClass:
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+ break;
+
+ case Stmt::CompoundLiteralExprClass:
+ VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::ConditionalOperatorClass: { // '?' operator
+ ConditionalOperator* C = cast<ConditionalOperator>(S);
+ VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ break;
+ }
+
+ case Stmt::DeclRefExprClass:
+ case Stmt::QualifiedDeclRefExprClass:
+ VisitDeclRefExpr(cast<DeclRefExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::DeclStmtClass:
+ VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass: {
+ CastExpr* C = cast<CastExpr>(S);
+ VisitCast(C, C->getSubExpr(), Pred, Dst);
+ break;
+ }
+
+ case Stmt::InitListExprClass:
+ VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
+ break;
+
+ case Stmt::MemberExprClass:
+ VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::ObjCIvarRefExprClass:
+ VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst, false);
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::ObjCMessageExprClass: {
+ VisitObjCMessageExpr(cast<ObjCMessageExpr>(S), Pred, Dst);
+ break;
+ }
+
+ case Stmt::ObjCAtThrowStmtClass: {
+ // FIXME: This is not complete. We basically treat @throw as
+ // an abort.
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ Builder->BuildSinks = true;
+ MakeNode(Dst, S, Pred, GetState(Pred));
+ break;
+ }
+
+ case Stmt::ParenExprClass:
+ Visit(cast<ParenExpr>(S)->getSubExpr()->IgnoreParens(), Pred, Dst);
+ break;
+
+ case Stmt::ReturnStmtClass:
+ VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
+ break;
+
+ case Stmt::SizeOfAlignOfExprClass:
+ VisitSizeOfAlignOfExpr(cast<SizeOfAlignOfExpr>(S), Pred, Dst);
+ break;
+
+ case Stmt::StmtExprClass: {
+ StmtExpr* SE = cast<StmtExpr>(S);
+
+ if (SE->getSubStmt()->body_empty()) {
+ // Empty statement expression.
+ assert(SE->getType() == getContext().VoidTy
+ && "Empty statement expression must have void type.");
+ Dst.Add(Pred);
+ break;
+ }
+
+ if (Expr* LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
+ const GRState* state = GetState(Pred);
+ MakeNode(Dst, SE, Pred, BindExpr(state, SE, GetSVal(state, LastExpr)));
+ }
+ else
+ Dst.Add(Pred);
+
+ break;
+ }
+
+ case Stmt::StringLiteralClass:
+ VisitLValue(cast<StringLiteral>(S), Pred, Dst);
+ break;
+
+ case Stmt::UnaryOperatorClass: {
+ UnaryOperator *U = cast<UnaryOperator>(S);
+ if (EagerlyAssume && (U->getOpcode() == UnaryOperator::LNot)) {
+ NodeSet Tmp;
+ VisitUnaryOperator(U, Pred, Tmp, false);
+ EvalEagerlyAssume(Dst, Tmp, U);
+ }
+ else
+ VisitUnaryOperator(U, Pred, Dst, false);
+ break;
+ }
+ }
+}
+
+void GRExprEngine::VisitLValue(Expr* Ex, NodeTy* Pred, NodeSet& Dst) {
+
+ Ex = Ex->IgnoreParens();
+
+ if (Ex != CurrentStmt && getCFG().isBlkExpr(Ex)) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ switch (Ex->getStmtClass()) {
+
+ case Stmt::ArraySubscriptExprClass:
+ VisitArraySubscriptExpr(cast<ArraySubscriptExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::DeclRefExprClass:
+ case Stmt::QualifiedDeclRefExprClass:
+ VisitDeclRefExpr(cast<DeclRefExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::ObjCIvarRefExprClass:
+ VisitObjCIvarRefExpr(cast<ObjCIvarRefExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::UnaryOperatorClass:
+ VisitUnaryOperator(cast<UnaryOperator>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::MemberExprClass:
+ VisitMemberExpr(cast<MemberExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::CompoundLiteralExprClass:
+ VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(Ex), Pred, Dst, true);
+ return;
+
+ case Stmt::ObjCPropertyRefExprClass:
+ case Stmt::ObjCKVCRefExprClass:
+ // FIXME: Property assignments are lvalues, but not really "locations".
+ // e.g.: self.x = something;
+ // Here the "self.x" really can translate to a method call (setter) when
+ // the assignment is made. Moreover, the entire assignment expression
+ // evaluate to whatever "something" is, not calling the "getter" for
+ // the property (which would make sense since it can have side effects).
+ // We'll probably treat this as a location, but not one that we can
+ // take the address of. Perhaps we need a new SVal class for cases
+ // like thsis?
+ // Note that we have a similar problem for bitfields, since they don't
+ // have "locations" in the sense that we can take their address.
+ Dst.Add(Pred);
+ return;
+
+ case Stmt::StringLiteralClass: {
+ const GRState* state = GetState(Pred);
+ SVal V = StateMgr.GetLValue(state, cast<StringLiteral>(Ex));
+ MakeNode(Dst, Ex, Pred, BindExpr(state, Ex, V));
+ return;
+ }
+
+ default:
+ // Arbitrary subexpressions can return aggregate temporaries that
+ // can be used in a lvalue context. We need to enhance our support
+ // of such temporaries in both the environment and the store, so right
+ // now we just do a regular visit.
+ assert ((Ex->getType()->isAggregateType()) &&
+ "Other kinds of expressions with non-aggregate/union types do"
+ " not have lvalues.");
+
+ Visit(Ex, Pred, Dst);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Block entrance. (Update counters).
+//===----------------------------------------------------------------------===//
+
+bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const GRState*,
+ GRBlockCounter BC) {
+
+ return BC.getNumVisited(B->getBlockID()) < 3;
+}
+
+//===----------------------------------------------------------------------===//
+// Generic node creation.
+//===----------------------------------------------------------------------===//
+
+GRExprEngine::NodeTy* GRExprEngine::MakeNode(NodeSet& Dst, Stmt* S,
+ NodeTy* Pred,
+ const GRState* St,
+ ProgramPoint::Kind K,
+ const void *tag) {
+
+ assert (Builder && "GRStmtNodeBuilder not present.");
+ SaveAndRestore<const void*> OldTag(Builder->Tag);
+ Builder->Tag = tag;
+ return Builder->MakeNode(Dst, S, Pred, St, K);
+}
+
+//===----------------------------------------------------------------------===//
+// Branch processing.
+//===----------------------------------------------------------------------===//
+
+const GRState* GRExprEngine::MarkBranch(const GRState* state,
+ Stmt* Terminator,
+ bool branchTaken) {
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ return state;
+
+ case Stmt::BinaryOperatorClass: { // '&&' and '||'
+
+ BinaryOperator* B = cast<BinaryOperator>(Terminator);
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ assert (Op == BinaryOperator::LAnd || Op == BinaryOperator::LOr);
+
+ // For &&, if we take the true branch, then the value of the whole
+ // expression is that of the RHS expression.
+ //
+ // For ||, if we take the false branch, then the value of the whole
+ // expression is that of the RHS expression.
+
+ Expr* Ex = (Op == BinaryOperator::LAnd && branchTaken) ||
+ (Op == BinaryOperator::LOr && !branchTaken)
+ ? B->getRHS() : B->getLHS();
+
+ return BindBlkExpr(state, B, UndefinedVal(Ex));
+ }
+
+ case Stmt::ConditionalOperatorClass: { // ?:
+
+ ConditionalOperator* C = cast<ConditionalOperator>(Terminator);
+
+ // For ?, if branchTaken == true then the value is either the LHS or
+ // the condition itself. (GNU extension).
+
+ Expr* Ex;
+
+ if (branchTaken)
+ Ex = C->getLHS() ? C->getLHS() : C->getCond();
+ else
+ Ex = C->getRHS();
+
+ return BindBlkExpr(state, C, UndefinedVal(Ex));
+ }
+
+ case Stmt::ChooseExprClass: { // ?:
+
+ ChooseExpr* C = cast<ChooseExpr>(Terminator);
+
+ Expr* Ex = branchTaken ? C->getLHS() : C->getRHS();
+ return BindBlkExpr(state, C, UndefinedVal(Ex));
+ }
+ }
+}
+
+/// RecoverCastedSymbol - A helper function for ProcessBranch that is used
+/// to try to recover some path-sensitivity for casts of symbolic
+/// integers that promote their values (which are currently not tracked well).
+/// This function returns the SVal bound to Condition->IgnoreCasts if all the
+// cast(s) did was sign-extend the original value.
+static SVal RecoverCastedSymbol(GRStateManager& StateMgr, const GRState* state,
+ Stmt* Condition, ASTContext& Ctx) {
+
+ Expr *Ex = dyn_cast<Expr>(Condition);
+ if (!Ex)
+ return UnknownVal();
+
+ uint64_t bits = 0;
+ bool bitsInit = false;
+
+ while (CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ QualType T = CE->getType();
+
+ if (!T->isIntegerType())
+ return UnknownVal();
+
+ uint64_t newBits = Ctx.getTypeSize(T);
+ if (!bitsInit || newBits < bits) {
+ bitsInit = true;
+ bits = newBits;
+ }
+
+ Ex = CE->getSubExpr();
+ }
+
+ // We reached a non-cast. Is it a symbolic value?
+ QualType T = Ex->getType();
+
+ if (!bitsInit || !T->isIntegerType() || Ctx.getTypeSize(T) > bits)
+ return UnknownVal();
+
+ return StateMgr.GetSVal(state, Ex);
+}
+
+void GRExprEngine::ProcessBranch(Stmt* Condition, Stmt* Term,
+ BranchNodeBuilder& builder) {
+
+ // Remove old bindings for subexpressions.
+ const GRState* PrevState =
+ StateMgr.RemoveSubExprBindings(builder.getState());
+
+ // Check for NULL conditions; e.g. "for(;;)"
+ if (!Condition) {
+ builder.markInfeasible(false);
+ return;
+ }
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ Condition->getLocStart(),
+ "Error evaluating branch");
+
+ SVal V = GetSVal(PrevState, Condition);
+
+ switch (V.getBaseKind()) {
+ default:
+ break;
+
+ case SVal::UnknownKind: {
+ if (Expr *Ex = dyn_cast<Expr>(Condition)) {
+ if (Ex->getType()->isIntegerType()) {
+ // Try to recover some path-sensitivity. Right now casts of symbolic
+ // integers that promote their values are currently not tracked well.
+ // If 'Condition' is such an expression, try and recover the
+ // underlying value and use that instead.
+ SVal recovered = RecoverCastedSymbol(getStateManager(),
+ builder.getState(), Condition,
+ getContext());
+
+ if (!recovered.isUnknown()) {
+ V = recovered;
+ break;
+ }
+ }
+ }
+
+ builder.generateNode(MarkBranch(PrevState, Term, true), true);
+ builder.generateNode(MarkBranch(PrevState, Term, false), false);
+ return;
+ }
+
+ case SVal::UndefinedKind: {
+ NodeTy* N = builder.generateNode(PrevState, true);
+
+ if (N) {
+ N->markAsSink();
+ UndefBranches.insert(N);
+ }
+
+ builder.markInfeasible(false);
+ return;
+ }
+ }
+
+ // Process the true branch.
+
+ bool isFeasible = false;
+ const GRState* state = Assume(PrevState, V, true, isFeasible);
+
+ if (isFeasible)
+ builder.generateNode(MarkBranch(state, Term, true), true);
+ else
+ builder.markInfeasible(true);
+
+ // Process the false branch.
+
+ isFeasible = false;
+ state = Assume(PrevState, V, false, isFeasible);
+
+ if (isFeasible)
+ builder.generateNode(MarkBranch(state, Term, false), false);
+ else
+ builder.markInfeasible(false);
+}
+
+/// ProcessIndirectGoto - Called by GRCoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a computed goto jump.
+void GRExprEngine::ProcessIndirectGoto(IndirectGotoNodeBuilder& builder) {
+
+ const GRState* state = builder.getState();
+ SVal V = GetSVal(state, builder.getTarget());
+
+ // Three possibilities:
+ //
+ // (1) We know the computed label.
+ // (2) The label is NULL (or some other constant), or Undefined.
+ // (3) We have no clue about the label. Dispatch to all targets.
+ //
+
+ typedef IndirectGotoNodeBuilder::iterator iterator;
+
+ if (isa<loc::GotoLabel>(V)) {
+ LabelStmt* L = cast<loc::GotoLabel>(V).getLabel();
+
+ for (iterator I=builder.begin(), E=builder.end(); I != E; ++I) {
+ if (I.getLabel() == L) {
+ builder.generateNode(I, state);
+ return;
+ }
+ }
+
+ assert (false && "No block with label.");
+ return;
+ }
+
+ if (isa<loc::ConcreteInt>(V) || isa<UndefinedVal>(V)) {
+ // Dispatch to the first target and mark it as a sink.
+ NodeTy* N = builder.generateNode(builder.begin(), state, true);
+ UndefBranches.insert(N);
+ return;
+ }
+
+ // This is really a catch-all. We don't support symbolics yet.
+ // FIXME: Implement dispatch for symbolic pointers.
+
+ for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
+ builder.generateNode(I, state);
+}
+
+
+void GRExprEngine::VisitGuardedExpr(Expr* Ex, Expr* L, Expr* R,
+ NodeTy* Pred, NodeSet& Dst) {
+
+ assert (Ex == CurrentStmt && getCFG().isBlkExpr(Ex));
+
+ const GRState* state = GetState(Pred);
+ SVal X = GetBlkExprSVal(state, Ex);
+
+ assert (X.isUndef());
+
+ Expr* SE = (Expr*) cast<UndefinedVal>(X).getData();
+
+ assert (SE);
+
+ X = GetBlkExprSVal(state, SE);
+
+ // Make sure that we invalidate the previous binding.
+ MakeNode(Dst, Ex, Pred, StateMgr.BindExpr(state, Ex, X, true, true));
+}
+
+/// ProcessSwitch - Called by GRCoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a switch statement.
+void GRExprEngine::ProcessSwitch(SwitchNodeBuilder& builder) {
+ typedef SwitchNodeBuilder::iterator iterator;
+ const GRState* state = builder.getState();
+ Expr* CondE = builder.getCondition();
+ SVal CondV = GetSVal(state, CondE);
+
+ if (CondV.isUndef()) {
+ NodeTy* N = builder.generateDefaultCaseNode(state, true);
+ UndefBranches.insert(N);
+ return;
+ }
+
+ const GRState* DefaultSt = state;
+ bool DefaultFeasible = false;
+
+ for (iterator I = builder.begin(), EI = builder.end(); I != EI; ++I) {
+ CaseStmt* Case = cast<CaseStmt>(I.getCase());
+
+ // Evaluate the LHS of the case value.
+ Expr::EvalResult V1;
+ bool b = Case->getLHS()->Evaluate(V1, getContext());
+
+ // Sanity checks. These go away in Release builds.
+ assert(b && V1.Val.isInt() && !V1.HasSideEffects
+ && "Case condition must evaluate to an integer constant.");
+ b = b; // silence unused variable warning
+ assert(V1.Val.getInt().getBitWidth() ==
+ getContext().getTypeSize(CondE->getType()));
+
+ // Get the RHS of the case, if it exists.
+ Expr::EvalResult V2;
+
+ if (Expr* E = Case->getRHS()) {
+ b = E->Evaluate(V2, getContext());
+ assert(b && V2.Val.isInt() && !V2.HasSideEffects
+ && "Case condition must evaluate to an integer constant.");
+ b = b; // silence unused variable warning
+ }
+ else
+ V2 = V1;
+
+ // FIXME: Eventually we should replace the logic below with a range
+ // comparison, rather than concretize the values within the range.
+ // This should be easy once we have "ranges" for NonLVals.
+
+ do {
+ nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1.Val.getInt()));
+ SVal Res = EvalBinOp(DefaultSt, BinaryOperator::EQ, CondV, CaseVal,
+ getContext().IntTy);
+
+ // Now "assume" that the case matches.
+ bool isFeasible = false;
+ const GRState* StNew = Assume(state, Res, true, isFeasible);
+
+ if (isFeasible) {
+ builder.generateCaseStmtNode(I, StNew);
+
+ // If CondV evaluates to a constant, then we know that this
+ // is the *only* case that we can take, so stop evaluating the
+ // others.
+ if (isa<nonloc::ConcreteInt>(CondV))
+ return;
+ }
+
+ // Now "assume" that the case doesn't match. Add this state
+ // to the default state (if it is feasible).
+
+ isFeasible = false;
+ StNew = Assume(DefaultSt, Res, false, isFeasible);
+
+ if (isFeasible) {
+ DefaultFeasible = true;
+ DefaultSt = StNew;
+ }
+
+ // Concretize the next value in the range.
+ if (V1.Val.getInt() == V2.Val.getInt())
+ break;
+
+ ++V1.Val.getInt();
+ assert (V1.Val.getInt() <= V2.Val.getInt());
+
+ } while (true);
+ }
+
+ // If we reach here, than we know that the default branch is
+ // possible.
+ if (DefaultFeasible) builder.generateDefaultCaseNode(DefaultSt);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: logical operations ('&&', '||').
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitLogicalExpr(BinaryOperator* B, NodeTy* Pred,
+ NodeSet& Dst) {
+
+ assert (B->getOpcode() == BinaryOperator::LAnd ||
+ B->getOpcode() == BinaryOperator::LOr);
+
+ assert (B == CurrentStmt && getCFG().isBlkExpr(B));
+
+ const GRState* state = GetState(Pred);
+ SVal X = GetBlkExprSVal(state, B);
+
+ assert (X.isUndef());
+
+ Expr* Ex = (Expr*) cast<UndefinedVal>(X).getData();
+
+ assert (Ex);
+
+ if (Ex == B->getRHS()) {
+
+ X = GetBlkExprSVal(state, Ex);
+
+ // Handle undefined values.
+
+ if (X.isUndef()) {
+ MakeNode(Dst, B, Pred, BindBlkExpr(state, B, X));
+ return;
+ }
+
+ // We took the RHS. Because the value of the '&&' or '||' expression must
+ // evaluate to 0 or 1, we must assume the value of the RHS evaluates to 0
+ // or 1. Alternatively, we could take a lazy approach, and calculate this
+ // value later when necessary. We don't have the machinery in place for
+ // this right now, and since most logical expressions are used for branches,
+ // the payoff is not likely to be large. Instead, we do eager evaluation.
+
+ bool isFeasible = false;
+ const GRState* NewState = Assume(state, X, true, isFeasible);
+
+ if (isFeasible)
+ MakeNode(Dst, B, Pred,
+ BindBlkExpr(NewState, B, MakeConstantVal(1U, B)));
+
+ isFeasible = false;
+ NewState = Assume(state, X, false, isFeasible);
+
+ if (isFeasible)
+ MakeNode(Dst, B, Pred,
+ BindBlkExpr(NewState, B, MakeConstantVal(0U, B)));
+ }
+ else {
+ // We took the LHS expression. Depending on whether we are '&&' or
+ // '||' we know what the value of the expression is via properties of
+ // the short-circuiting.
+
+ X = MakeConstantVal( B->getOpcode() == BinaryOperator::LAnd ? 0U : 1U, B);
+ MakeNode(Dst, B, Pred, BindBlkExpr(state, B, X));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Loads and stores.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitDeclRefExpr(DeclRefExpr* Ex, NodeTy* Pred, NodeSet& Dst,
+ bool asLValue) {
+
+ const GRState* state = GetState(Pred);
+
+ const NamedDecl* D = Ex->getDecl();
+
+ if (const VarDecl* VD = dyn_cast<VarDecl>(D)) {
+
+ SVal V = StateMgr.GetLValue(state, VD);
+
+ if (asLValue)
+ MakeNode(Dst, Ex, Pred, BindExpr(state, Ex, V),
+ ProgramPoint::PostLValueKind);
+ else
+ EvalLoad(Dst, Ex, Pred, state, V);
+ return;
+
+ } else if (const EnumConstantDecl* ED = dyn_cast<EnumConstantDecl>(D)) {
+ assert(!asLValue && "EnumConstantDecl does not have lvalue.");
+
+ BasicValueFactory& BasicVals = StateMgr.getBasicVals();
+ SVal V = nonloc::ConcreteInt(BasicVals.getValue(ED->getInitVal()));
+ MakeNode(Dst, Ex, Pred, BindExpr(state, Ex, V));
+ return;
+
+ } else if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D)) {
+ assert(asLValue);
+ SVal V = ValMgr.getFunctionPointer(FD);
+ MakeNode(Dst, Ex, Pred, BindExpr(state, Ex, V),
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+
+ assert (false &&
+ "ValueDecl support for this ValueDecl not implemented.");
+}
+
+/// VisitArraySubscriptExpr - Transfer function for array accesses
+void GRExprEngine::VisitArraySubscriptExpr(ArraySubscriptExpr* A, NodeTy* Pred,
+ NodeSet& Dst, bool asLValue) {
+
+ Expr* Base = A->getBase()->IgnoreParens();
+ Expr* Idx = A->getIdx()->IgnoreParens();
+ NodeSet Tmp;
+
+ if (Base->getType()->isVectorType()) {
+ // For vector types get its lvalue.
+ // FIXME: This may not be correct. Is the rvalue of a vector its location?
+ // In fact, I think this is just a hack. We need to get the right
+ // semantics.
+ VisitLValue(Base, Pred, Tmp);
+ }
+ else
+ Visit(Base, Pred, Tmp); // Get Base's rvalue, which should be an LocVal.
+
+ for (NodeSet::iterator I1=Tmp.begin(), E1=Tmp.end(); I1!=E1; ++I1) {
+ NodeSet Tmp2;
+ Visit(Idx, *I1, Tmp2); // Evaluate the index.
+
+ for (NodeSet::iterator I2=Tmp2.begin(), E2=Tmp2.end(); I2!=E2; ++I2) {
+ const GRState* state = GetState(*I2);
+ SVal V = StateMgr.GetLValue(state, A->getType(),
+ GetSVal(state, Base),
+ GetSVal(state, Idx));
+
+ if (asLValue)
+ MakeNode(Dst, A, *I2, BindExpr(state, A, V),
+ ProgramPoint::PostLValueKind);
+ else
+ EvalLoad(Dst, A, *I2, state, V);
+ }
+ }
+}
+
+/// VisitMemberExpr - Transfer function for member expressions.
+void GRExprEngine::VisitMemberExpr(MemberExpr* M, NodeTy* Pred,
+ NodeSet& Dst, bool asLValue) {
+
+ Expr* Base = M->getBase()->IgnoreParens();
+ NodeSet Tmp;
+
+ if (M->isArrow())
+ Visit(Base, Pred, Tmp); // p->f = ... or ... = p->f
+ else
+ VisitLValue(Base, Pred, Tmp); // x.f = ... or ... = x.f
+
+ FieldDecl *Field = dyn_cast<FieldDecl>(M->getMemberDecl());
+ if (!Field) // FIXME: skipping member expressions for non-fields
+ return;
+
+ for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ const GRState* state = GetState(*I);
+ // FIXME: Should we insert some assumption logic in here to determine
+ // if "Base" is a valid piece of memory? Before we put this assumption
+ // later when using FieldOffset lvals (which we no longer have).
+ SVal L = StateMgr.GetLValue(state, GetSVal(state, Base), Field);
+
+ if (asLValue)
+ MakeNode(Dst, M, *I, BindExpr(state, M, L),
+ ProgramPoint::PostLValueKind);
+ else
+ EvalLoad(Dst, M, *I, state, L);
+ }
+}
+
+/// EvalBind - Handle the semantics of binding a value to a specific location.
+/// This method is used by EvalStore and (soon) VisitDeclStmt, and others.
+void GRExprEngine::EvalBind(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
+ const GRState* state, SVal location, SVal Val) {
+
+ const GRState* newState = 0;
+
+ if (location.isUnknown()) {
+ // We know that the new state will be the same as the old state since
+ // the location of the binding is "unknown". Consequently, there
+ // is no reason to just create a new node.
+ newState = state;
+ }
+ else {
+ // We are binding to a value other than 'unknown'. Perform the binding
+ // using the StoreManager.
+ newState = StateMgr.BindLoc(state, cast<Loc>(location), Val);
+ }
+
+ // The next thing to do is check if the GRTransferFuncs object wants to
+ // update the state based on the new binding. If the GRTransferFunc object
+ // doesn't do anything, just auto-propagate the current state.
+ GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, Pred, newState, Ex,
+ newState != state);
+
+ getTF().EvalBind(BuilderRef, location, Val);
+}
+
+/// EvalStore - Handle the semantics of a store via an assignment.
+/// @param Dst The node set to store generated state nodes
+/// @param Ex The expression representing the location of the store
+/// @param state The current simulation state
+/// @param location The location to store the value
+/// @param Val The value to be stored
+void GRExprEngine::EvalStore(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
+ const GRState* state, SVal location, SVal Val,
+ const void *tag) {
+
+ assert (Builder && "GRStmtNodeBuilder must be defined.");
+
+ // Evaluate the location (checks for bad dereferences).
+ Pred = EvalLocation(Ex, Pred, state, location, tag);
+
+ if (!Pred)
+ return;
+
+ assert (!location.isUndef());
+ state = GetState(Pred);
+
+ // Proceed with the store.
+ SaveAndRestore<ProgramPoint::Kind> OldSPointKind(Builder->PointKind);
+ SaveAndRestore<const void*> OldTag(Builder->Tag);
+ Builder->PointKind = ProgramPoint::PostStoreKind;
+ Builder->Tag = tag;
+ EvalBind(Dst, Ex, Pred, state, location, Val);
+}
+
+void GRExprEngine::EvalLoad(NodeSet& Dst, Expr* Ex, NodeTy* Pred,
+ const GRState* state, SVal location,
+ const void *tag) {
+
+ // Evaluate the location (checks for bad dereferences).
+ Pred = EvalLocation(Ex, Pred, state, location, tag);
+
+ if (!Pred)
+ return;
+
+ state = GetState(Pred);
+
+ // Proceed with the load.
+ ProgramPoint::Kind K = ProgramPoint::PostLoadKind;
+
+ // FIXME: Currently symbolic analysis "generates" new symbols
+ // for the contents of values. We need a better approach.
+
+ if (location.isUnknown()) {
+ // This is important. We must nuke the old binding.
+ MakeNode(Dst, Ex, Pred, BindExpr(state, Ex, UnknownVal()), K, tag);
+ }
+ else {
+ SVal V = GetSVal(state, cast<Loc>(location), Ex->getType());
+ MakeNode(Dst, Ex, Pred, BindExpr(state, Ex, V), K, tag);
+ }
+}
+
+void GRExprEngine::EvalStore(NodeSet& Dst, Expr* Ex, Expr* StoreE, NodeTy* Pred,
+ const GRState* state, SVal location, SVal Val,
+ const void *tag) {
+
+ NodeSet TmpDst;
+ EvalStore(TmpDst, StoreE, Pred, state, location, Val, tag);
+
+ for (NodeSet::iterator I=TmpDst.begin(), E=TmpDst.end(); I!=E; ++I)
+ MakeNode(Dst, Ex, *I, (*I)->getState(), ProgramPoint::PostStmtKind, tag);
+}
+
+GRExprEngine::NodeTy* GRExprEngine::EvalLocation(Stmt* Ex, NodeTy* Pred,
+ const GRState* state,
+ SVal location,
+ const void *tag) {
+
+ SaveAndRestore<const void*> OldTag(Builder->Tag);
+ Builder->Tag = tag;
+
+ // Check for loads/stores from/to undefined values.
+ if (location.isUndef()) {
+ NodeTy* N =
+ Builder->generateNode(Ex, state, Pred,
+ ProgramPoint::PostUndefLocationCheckFailedKind);
+
+ if (N) {
+ N->markAsSink();
+ UndefDeref.insert(N);
+ }
+
+ return 0;
+ }
+
+ // Check for loads/stores from/to unknown locations. Treat as No-Ops.
+ if (location.isUnknown())
+ return Pred;
+
+ // During a load, one of two possible situations arise:
+ // (1) A crash, because the location (pointer) was NULL.
+ // (2) The location (pointer) is not NULL, and the dereference works.
+ //
+ // We add these assumptions.
+
+ Loc LV = cast<Loc>(location);
+
+ // "Assume" that the pointer is not NULL.
+ bool isFeasibleNotNull = false;
+ const GRState* StNotNull = Assume(state, LV, true, isFeasibleNotNull);
+
+ // "Assume" that the pointer is NULL.
+ bool isFeasibleNull = false;
+ GRStateRef StNull = GRStateRef(Assume(state, LV, false, isFeasibleNull),
+ getStateManager());
+
+ if (isFeasibleNull) {
+
+ // Use the Generic Data Map to mark in the state what lval was null.
+ const SVal* PersistentLV = getBasicVals().getPersistentSVal(LV);
+ StNull = StNull.set<GRState::NullDerefTag>(PersistentLV);
+
+ // We don't use "MakeNode" here because the node will be a sink
+ // and we have no intention of processing it later.
+ NodeTy* NullNode =
+ Builder->generateNode(Ex, StNull, Pred,
+ ProgramPoint::PostNullCheckFailedKind);
+
+ if (NullNode) {
+
+ NullNode->markAsSink();
+
+ if (isFeasibleNotNull) ImplicitNullDeref.insert(NullNode);
+ else ExplicitNullDeref.insert(NullNode);
+ }
+ }
+
+ if (!isFeasibleNotNull)
+ return 0;
+
+ // Check for out-of-bound array access.
+ if (isa<loc::MemRegionVal>(LV)) {
+ const MemRegion* R = cast<loc::MemRegionVal>(LV).getRegion();
+ if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
+ // Get the index of the accessed element.
+ SVal Idx = ER->getIndex();
+ // Get the extent of the array.
+ SVal NumElements = getStoreManager().getSizeInElements(StNotNull,
+ ER->getSuperRegion());
+
+ bool isFeasibleInBound = false;
+ const GRState* StInBound = AssumeInBound(StNotNull, Idx, NumElements,
+ true, isFeasibleInBound);
+
+ bool isFeasibleOutBound = false;
+ const GRState* StOutBound = AssumeInBound(StNotNull, Idx, NumElements,
+ false, isFeasibleOutBound);
+
+ if (isFeasibleOutBound) {
+ // Report warning. Make sink node manually.
+ NodeTy* OOBNode =
+ Builder->generateNode(Ex, StOutBound, Pred,
+ ProgramPoint::PostOutOfBoundsCheckFailedKind);
+
+ if (OOBNode) {
+ OOBNode->markAsSink();
+
+ if (isFeasibleInBound)
+ ImplicitOOBMemAccesses.insert(OOBNode);
+ else
+ ExplicitOOBMemAccesses.insert(OOBNode);
+ }
+ }
+
+ if (!isFeasibleInBound)
+ return 0;
+
+ StNotNull = StInBound;
+ }
+ }
+
+ // Generate a new node indicating the checks succeed.
+ return Builder->generateNode(Ex, StNotNull, Pred,
+ ProgramPoint::PostLocationChecksSucceedKind);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: OSAtomics.
+//
+// FIXME: Eventually refactor into a more "plugin" infrastructure.
+//===----------------------------------------------------------------------===//
+
+// Mac OS X:
+// http://developer.apple.com/documentation/Darwin/Reference/Manpages/man3
+// atomic.3.html
+//
+static bool EvalOSAtomicCompareAndSwap(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode<GRState>* Pred) {
+
+ // Not enough arguments to match OSAtomicCompareAndSwap?
+ if (CE->getNumArgs() != 3)
+ return false;
+
+ ASTContext &C = Engine.getContext();
+ Expr *oldValueExpr = CE->getArg(0);
+ QualType oldValueType = C.getCanonicalType(oldValueExpr->getType());
+
+ Expr *newValueExpr = CE->getArg(1);
+ QualType newValueType = C.getCanonicalType(newValueExpr->getType());
+
+ // Do the types of 'oldValue' and 'newValue' match?
+ if (oldValueType != newValueType)
+ return false;
+
+ Expr *theValueExpr = CE->getArg(2);
+ const PointerType *theValueType = theValueExpr->getType()->getAsPointerType();
+
+ // theValueType not a pointer?
+ if (!theValueType)
+ return false;
+
+ QualType theValueTypePointee =
+ C.getCanonicalType(theValueType->getPointeeType()).getUnqualifiedType();
+
+ // The pointee must match newValueType and oldValueType.
+ if (theValueTypePointee != newValueType)
+ return false;
+
+ static unsigned magic_load = 0;
+ static unsigned magic_store = 0;
+
+ const void *OSAtomicLoadTag = &magic_load;
+ const void *OSAtomicStoreTag = &magic_store;
+
+ // Load 'theValue'.
+ GRStateManager &StateMgr = Engine.getStateManager();
+ const GRState *state = Pred->getState();
+ ExplodedNodeSet<GRState> Tmp;
+ SVal location = StateMgr.GetSVal(state, theValueExpr);
+ Engine.EvalLoad(Tmp, theValueExpr, Pred, state, location, OSAtomicLoadTag);
+
+ for (ExplodedNodeSet<GRState>::iterator I = Tmp.begin(), E = Tmp.end();
+ I != E; ++I) {
+
+ ExplodedNode<GRState> *N = *I;
+ const GRState *stateLoad = N->getState();
+ SVal theValueVal = StateMgr.GetSVal(stateLoad, theValueExpr);
+ SVal oldValueVal = StateMgr.GetSVal(stateLoad, oldValueExpr);
+
+ // Perform the comparison.
+ SVal Cmp = Engine.EvalBinOp(stateLoad,
+ BinaryOperator::EQ, theValueVal, oldValueVal,
+ Engine.getContext().IntTy);
+ bool isFeasible = false;
+ const GRState *stateEqual = StateMgr.Assume(stateLoad, Cmp, true,
+ isFeasible);
+
+ // Were they equal?
+ if (isFeasible) {
+ // Perform the store.
+ ExplodedNodeSet<GRState> TmpStore;
+ Engine.EvalStore(TmpStore, theValueExpr, N, stateEqual, location,
+ StateMgr.GetSVal(stateEqual, newValueExpr),
+ OSAtomicStoreTag);
+
+ // Now bind the result of the comparison.
+ for (ExplodedNodeSet<GRState>::iterator I2 = TmpStore.begin(),
+ E2 = TmpStore.end(); I2 != E2; ++I2) {
+ ExplodedNode<GRState> *predNew = *I2;
+ const GRState *stateNew = predNew->getState();
+ SVal Res = Engine.getValueManager().makeTruthVal(true, CE->getType());
+ Engine.MakeNode(Dst, CE, predNew, Engine.BindExpr(stateNew, CE, Res));
+ }
+ }
+
+ // Were they not equal?
+ isFeasible = false;
+ const GRState *stateNotEqual = StateMgr.Assume(stateLoad, Cmp, false,
+ isFeasible);
+
+ if (isFeasible) {
+ SVal Res = Engine.getValueManager().makeTruthVal(false, CE->getType());
+ Engine.MakeNode(Dst, CE, N, Engine.BindExpr(stateNotEqual, CE, Res));
+ }
+ }
+
+ return true;
+}
+
+static bool EvalOSAtomic(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode<GRState>* Pred) {
+ const FunctionDecl* FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ const char *FName = FD->getNameAsCString();
+
+ // Check for compare and swap.
+ if (strncmp(FName, "OSAtomicCompareAndSwap", 22) == 0 ||
+ strncmp(FName, "objc_atomicCompareAndSwap", 25) == 0)
+ return EvalOSAtomicCompareAndSwap(Dst, Engine, Builder, CE, L, Pred);
+
+ // FIXME: Other atomics.
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Function calls.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::EvalCall(NodeSet& Dst, CallExpr* CE, SVal L, NodeTy* Pred) {
+ assert (Builder && "GRStmtNodeBuilder must be defined.");
+
+ // FIXME: Allow us to chain together transfer functions.
+ if (EvalOSAtomic(Dst, *this, *Builder, CE, L, Pred))
+ return;
+
+ getTF().EvalCall(Dst, *this, *Builder, CE, L, Pred);
+}
+
+void GRExprEngine::VisitCall(CallExpr* CE, NodeTy* Pred,
+ CallExpr::arg_iterator AI,
+ CallExpr::arg_iterator AE,
+ NodeSet& Dst)
+{
+ // Determine the type of function we're calling (if available).
+ const FunctionProtoType *Proto = NULL;
+ QualType FnType = CE->getCallee()->IgnoreParens()->getType();
+ if (const PointerType *FnTypePtr = FnType->getAsPointerType())
+ Proto = FnTypePtr->getPointeeType()->getAsFunctionProtoType();
+
+ VisitCallRec(CE, Pred, AI, AE, Dst, Proto, /*ParamIdx=*/0);
+}
+
+void GRExprEngine::VisitCallRec(CallExpr* CE, NodeTy* Pred,
+ CallExpr::arg_iterator AI,
+ CallExpr::arg_iterator AE,
+ NodeSet& Dst, const FunctionProtoType *Proto,
+ unsigned ParamIdx) {
+
+ // Process the arguments.
+ if (AI != AE) {
+ // If the call argument is being bound to a reference parameter,
+ // visit it as an lvalue, not an rvalue.
+ bool VisitAsLvalue = false;
+ if (Proto && ParamIdx < Proto->getNumArgs())
+ VisitAsLvalue = Proto->getArgType(ParamIdx)->isReferenceType();
+
+ NodeSet DstTmp;
+ if (VisitAsLvalue)
+ VisitLValue(*AI, Pred, DstTmp);
+ else
+ Visit(*AI, Pred, DstTmp);
+ ++AI;
+
+ for (NodeSet::iterator DI=DstTmp.begin(), DE=DstTmp.end(); DI != DE; ++DI)
+ VisitCallRec(CE, *DI, AI, AE, Dst, Proto, ParamIdx + 1);
+
+ return;
+ }
+
+ // If we reach here we have processed all of the arguments. Evaluate
+ // the callee expression.
+
+ NodeSet DstTmp;
+ Expr* Callee = CE->getCallee()->IgnoreParens();
+
+ Visit(Callee, Pred, DstTmp);
+
+ // Finally, evaluate the function call.
+ for (NodeSet::iterator DI = DstTmp.begin(), DE = DstTmp.end(); DI!=DE; ++DI) {
+
+ const GRState* state = GetState(*DI);
+ SVal L = GetSVal(state, Callee);
+
+ // FIXME: Add support for symbolic function calls (calls involving
+ // function pointer values that are symbolic).
+
+ // Check for undefined control-flow or calls to NULL.
+
+ if (L.isUndef() || isa<loc::ConcreteInt>(L)) {
+ NodeTy* N = Builder->generateNode(CE, state, *DI);
+
+ if (N) {
+ N->markAsSink();
+ BadCalls.insert(N);
+ }
+
+ continue;
+ }
+
+ // Check for the "noreturn" attribute.
+
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ const FunctionDecl* FD = L.getAsFunctionDecl();
+ if (FD) {
+ if (FD->getAttr<NoReturnAttr>() || FD->getAttr<AnalyzerNoReturnAttr>())
+ Builder->BuildSinks = true;
+ else {
+ // HACK: Some functions are not marked noreturn, and don't return.
+ // Here are a few hardwired ones. If this takes too long, we can
+ // potentially cache these results.
+ const char* s = FD->getIdentifier()->getName();
+ unsigned n = strlen(s);
+
+ switch (n) {
+ default:
+ break;
+
+ case 4:
+ if (!memcmp(s, "exit", 4)) Builder->BuildSinks = true;
+ break;
+
+ case 5:
+ if (!memcmp(s, "panic", 5)) Builder->BuildSinks = true;
+ else if (!memcmp(s, "error", 5)) {
+ if (CE->getNumArgs() > 0) {
+ SVal X = GetSVal(state, *CE->arg_begin());
+ // FIXME: use Assume to inspect the possible symbolic value of
+ // X. Also check the specific signature of error().
+ nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&X);
+ if (CI && CI->getValue() != 0)
+ Builder->BuildSinks = true;
+ }
+ }
+ break;
+
+ case 6:
+ if (!memcmp(s, "Assert", 6)) {
+ Builder->BuildSinks = true;
+ break;
+ }
+
+ // FIXME: This is just a wrapper around throwing an exception.
+ // Eventually inter-procedural analysis should handle this easily.
+ if (!memcmp(s, "ziperr", 6)) Builder->BuildSinks = true;
+
+ break;
+
+ case 7:
+ if (!memcmp(s, "assfail", 7)) Builder->BuildSinks = true;
+ break;
+
+ case 8:
+ if (!memcmp(s ,"db_error", 8) ||
+ !memcmp(s, "__assert", 8))
+ Builder->BuildSinks = true;
+ break;
+
+ case 12:
+ if (!memcmp(s, "__assert_rtn", 12)) Builder->BuildSinks = true;
+ break;
+
+ case 13:
+ if (!memcmp(s, "__assert_fail", 13)) Builder->BuildSinks = true;
+ break;
+
+ case 14:
+ if (!memcmp(s, "dtrace_assfail", 14) ||
+ !memcmp(s, "yy_fatal_error", 14))
+ Builder->BuildSinks = true;
+ break;
+
+ case 26:
+ if (!memcmp(s, "_XCAssertionFailureHandler", 26) ||
+ !memcmp(s, "_DTAssertionFailureHandler", 26) ||
+ !memcmp(s, "_TSAssertionFailureHandler", 26))
+ Builder->BuildSinks = true;
+
+ break;
+ }
+
+ }
+ }
+
+ // Evaluate the call.
+
+ if (FD) {
+
+ if (unsigned id = FD->getBuiltinID(getContext()))
+ switch (id) {
+ case Builtin::BI__builtin_expect: {
+ // For __builtin_expect, just return the value of the subexpression.
+ assert (CE->arg_begin() != CE->arg_end());
+ SVal X = GetSVal(state, *(CE->arg_begin()));
+ MakeNode(Dst, CE, *DI, BindExpr(state, CE, X));
+ continue;
+ }
+
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: Refactor into StoreManager itself?
+ MemRegionManager& RM = getStateManager().getRegionManager();
+ const MemRegion* R =
+ RM.getAllocaRegion(CE, Builder->getCurrentBlockCount());
+
+ // Set the extent of the region in bytes. This enables us to use the
+ // SVal of the argument directly. If we save the extent in bits, we
+ // cannot represent values like symbol*8.
+ SVal Extent = GetSVal(state, *(CE->arg_begin()));
+ state = getStoreManager().setExtent(state, R, Extent);
+
+ MakeNode(Dst, CE, *DI, BindExpr(state, CE, loc::MemRegionVal(R)));
+ continue;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ // Check any arguments passed-by-value against being undefined.
+
+ bool badArg = false;
+
+ for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I) {
+
+ if (GetSVal(GetState(*DI), *I).isUndef()) {
+ NodeTy* N = Builder->generateNode(CE, GetState(*DI), *DI);
+
+ if (N) {
+ N->markAsSink();
+ UndefArgs[N] = *I;
+ }
+
+ badArg = true;
+ break;
+ }
+ }
+
+ if (badArg)
+ continue;
+
+ // Dispatch to the plug-in transfer function.
+
+ unsigned size = Dst.size();
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+ EvalCall(Dst, CE, L, *DI);
+
+ // Handle the case where no nodes where generated. Auto-generate that
+ // contains the updated state if we aren't generating sinks.
+
+ if (!Builder->BuildSinks && Dst.size() == size &&
+ !Builder->HasGeneratedNode)
+ MakeNode(Dst, CE, *DI, state);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C ivar references.
+//===----------------------------------------------------------------------===//
+
+static std::pair<const void*,const void*> EagerlyAssumeTag
+ = std::pair<const void*,const void*>(&EagerlyAssumeTag,0);
+
+void GRExprEngine::EvalEagerlyAssume(NodeSet &Dst, NodeSet &Src, Expr *Ex) {
+ for (NodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
+ NodeTy *Pred = *I;
+
+ // Test if the previous node was as the same expression. This can happen
+ // when the expression fails to evaluate to anything meaningful and
+ // (as an optimization) we don't generate a node.
+ ProgramPoint P = Pred->getLocation();
+ if (!isa<PostStmt>(P) || cast<PostStmt>(P).getStmt() != Ex) {
+ Dst.Add(Pred);
+ continue;
+ }
+
+ const GRState* state = Pred->getState();
+ SVal V = GetSVal(state, Ex);
+ if (isa<nonloc::SymExprVal>(V)) {
+ // First assume that the condition is true.
+ bool isFeasible = false;
+ const GRState *stateTrue = Assume(state, V, true, isFeasible);
+ if (isFeasible) {
+ stateTrue = BindExpr(stateTrue, Ex, MakeConstantVal(1U, Ex));
+ Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag),
+ stateTrue, Pred));
+ }
+
+ // Next, assume that the condition is false.
+ isFeasible = false;
+ const GRState *stateFalse = Assume(state, V, false, isFeasible);
+ if (isFeasible) {
+ stateFalse = BindExpr(stateFalse, Ex, MakeConstantVal(0U, Ex));
+ Dst.Add(Builder->generateNode(PostStmtCustom(Ex, &EagerlyAssumeTag),
+ stateFalse, Pred));
+ }
+ }
+ else
+ Dst.Add(Pred);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C ivar references.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitObjCIvarRefExpr(ObjCIvarRefExpr* Ex,
+ NodeTy* Pred, NodeSet& Dst,
+ bool asLValue) {
+
+ Expr* Base = cast<Expr>(Ex->getBase());
+ NodeSet Tmp;
+ Visit(Base, Pred, Tmp);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ SVal BaseVal = GetSVal(state, Base);
+ SVal location = StateMgr.GetLValue(state, Ex->getDecl(), BaseVal);
+
+ if (asLValue)
+ MakeNode(Dst, Ex, *I, BindExpr(state, Ex, location));
+ else
+ EvalLoad(Dst, Ex, *I, state, location);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C fast enumeration 'for' statements.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitObjCForCollectionStmt(ObjCForCollectionStmt* S,
+ NodeTy* Pred, NodeSet& Dst) {
+
+ // ObjCForCollectionStmts are processed in two places. This method
+ // handles the case where an ObjCForCollectionStmt* occurs as one of the
+ // statements within a basic block. This transfer function does two things:
+ //
+ // (1) binds the next container value to 'element'. This creates a new
+ // node in the ExplodedGraph.
+ //
+ // (2) binds the value 0/1 to the ObjCForCollectionStmt* itself, indicating
+ // whether or not the container has any more elements. This value
+ // will be tested in ProcessBranch. We need to explicitly bind
+ // this value because a container can contain nil elements.
+ //
+ // FIXME: Eventually this logic should actually do dispatches to
+ // 'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
+ // This will require simulating a temporary NSFastEnumerationState, either
+ // through an SVal or through the use of MemRegions. This value can
+ // be affixed to the ObjCForCollectionStmt* instead of 0/1; when the loop
+ // terminates we reclaim the temporary (it goes out of scope) and we
+ // we can test if the SVal is 0 or if the MemRegion is null (depending
+ // on what approach we take).
+ //
+ // For now: simulate (1) by assigning either a symbol or nil if the
+ // container is empty. Thus this transfer function will by default
+ // result in state splitting.
+
+ Stmt* elem = S->getElement();
+ SVal ElementV;
+
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(elem)) {
+ VarDecl* ElemD = cast<VarDecl>(DS->getSingleDecl());
+ assert (ElemD->getInit() == 0);
+ ElementV = getStateManager().GetLValue(GetState(Pred), ElemD);
+ VisitObjCForCollectionStmtAux(S, Pred, Dst, ElementV);
+ return;
+ }
+
+ NodeSet Tmp;
+ VisitLValue(cast<Expr>(elem), Pred, Tmp);
+
+ for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ VisitObjCForCollectionStmtAux(S, *I, Dst, GetSVal(state, elem));
+ }
+}
+
+void GRExprEngine::VisitObjCForCollectionStmtAux(ObjCForCollectionStmt* S,
+ NodeTy* Pred, NodeSet& Dst,
+ SVal ElementV) {
+
+
+
+ // Get the current state. Use 'EvalLocation' to determine if it is a null
+ // pointer, etc.
+ Stmt* elem = S->getElement();
+
+ Pred = EvalLocation(elem, Pred, GetState(Pred), ElementV);
+ if (!Pred)
+ return;
+
+ GRStateRef state = GRStateRef(GetState(Pred), getStateManager());
+
+ // Handle the case where the container still has elements.
+ QualType IntTy = getContext().IntTy;
+ SVal TrueV = NonLoc::MakeVal(getBasicVals(), 1, IntTy);
+ GRStateRef hasElems = state.BindExpr(S, TrueV);
+
+ // Handle the case where the container has no elements.
+ SVal FalseV = NonLoc::MakeVal(getBasicVals(), 0, IntTy);
+ GRStateRef noElems = state.BindExpr(S, FalseV);
+
+ if (loc::MemRegionVal* MV = dyn_cast<loc::MemRegionVal>(&ElementV))
+ if (const TypedRegion* R = dyn_cast<TypedRegion>(MV->getRegion())) {
+ // FIXME: The proper thing to do is to really iterate over the
+ // container. We will do this with dispatch logic to the store.
+ // For now, just 'conjure' up a symbolic value.
+ QualType T = R->getValueType(getContext());
+ assert (Loc::IsLocType(T));
+ unsigned Count = Builder->getCurrentBlockCount();
+ SymbolRef Sym = SymMgr.getConjuredSymbol(elem, T, Count);
+ SVal V = Loc::MakeVal(getStoreManager().getRegionManager().getSymbolicRegion(Sym));
+ hasElems = hasElems.BindLoc(ElementV, V);
+
+ // Bind the location to 'nil' on the false branch.
+ SVal nilV = loc::ConcreteInt(getBasicVals().getValue(0, T));
+ noElems = noElems.BindLoc(ElementV, nilV);
+ }
+
+ // Create the new nodes.
+ MakeNode(Dst, S, Pred, hasElems);
+ MakeNode(Dst, S, Pred, noElems);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function: Objective-C message expressions.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitObjCMessageExpr(ObjCMessageExpr* ME, NodeTy* Pred,
+ NodeSet& Dst){
+
+ VisitObjCMessageExprArgHelper(ME, ME->arg_begin(), ME->arg_end(),
+ Pred, Dst);
+}
+
+void GRExprEngine::VisitObjCMessageExprArgHelper(ObjCMessageExpr* ME,
+ ObjCMessageExpr::arg_iterator AI,
+ ObjCMessageExpr::arg_iterator AE,
+ NodeTy* Pred, NodeSet& Dst) {
+ if (AI == AE) {
+
+ // Process the receiver.
+
+ if (Expr* Receiver = ME->getReceiver()) {
+ NodeSet Tmp;
+ Visit(Receiver, Pred, Tmp);
+
+ for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+ VisitObjCMessageExprDispatchHelper(ME, *NI, Dst);
+
+ return;
+ }
+
+ VisitObjCMessageExprDispatchHelper(ME, Pred, Dst);
+ return;
+ }
+
+ NodeSet Tmp;
+ Visit(*AI, Pred, Tmp);
+
+ ++AI;
+
+ for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+ VisitObjCMessageExprArgHelper(ME, AI, AE, *NI, Dst);
+}
+
+void GRExprEngine::VisitObjCMessageExprDispatchHelper(ObjCMessageExpr* ME,
+ NodeTy* Pred,
+ NodeSet& Dst) {
+
+ // FIXME: More logic for the processing the method call.
+
+ const GRState* state = GetState(Pred);
+ bool RaisesException = false;
+
+
+ if (Expr* Receiver = ME->getReceiver()) {
+
+ SVal L = GetSVal(state, Receiver);
+
+ // Check for undefined control-flow.
+ if (L.isUndef()) {
+ NodeTy* N = Builder->generateNode(ME, state, Pred);
+
+ if (N) {
+ N->markAsSink();
+ UndefReceivers.insert(N);
+ }
+
+ return;
+ }
+
+ // "Assume" that the receiver is not NULL.
+ bool isFeasibleNotNull = false;
+ const GRState *StNotNull = Assume(state, L, true, isFeasibleNotNull);
+
+ // "Assume" that the receiver is NULL.
+ bool isFeasibleNull = false;
+ const GRState *StNull = Assume(state, L, false, isFeasibleNull);
+
+ if (isFeasibleNull) {
+ QualType RetTy = ME->getType();
+
+ // Check if the receiver was nil and the return value a struct.
+ if(RetTy->isRecordType()) {
+ if (BR.getParentMap().isConsumedExpr(ME)) {
+ // The [0 ...] expressions will return garbage. Flag either an
+ // explicit or implicit error. Because of the structure of this
+ // function we currently do not bifurfacte the state graph at
+ // this point.
+ // FIXME: We should bifurcate and fill the returned struct with
+ // garbage.
+ if (NodeTy* N = Builder->generateNode(ME, StNull, Pred)) {
+ N->markAsSink();
+ if (isFeasibleNotNull)
+ NilReceiverStructRetImplicit.insert(N);
+ else
+ NilReceiverStructRetExplicit.insert(N);
+ }
+ }
+ }
+ else {
+ ASTContext& Ctx = getContext();
+ if (RetTy != Ctx.VoidTy) {
+ if (BR.getParentMap().isConsumedExpr(ME)) {
+ // sizeof(void *)
+ const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
+ // sizeof(return type)
+ const uint64_t returnTypeSize = Ctx.getTypeSize(ME->getType());
+
+ if(voidPtrSize < returnTypeSize) {
+ if (NodeTy* N = Builder->generateNode(ME, StNull, Pred)) {
+ N->markAsSink();
+ if(isFeasibleNotNull)
+ NilReceiverLargerThanVoidPtrRetImplicit.insert(N);
+ else
+ NilReceiverLargerThanVoidPtrRetExplicit.insert(N);
+ }
+ }
+ else if (!isFeasibleNotNull) {
+ // Handle the safe cases where the return value is 0 if the
+ // receiver is nil.
+ //
+ // FIXME: For now take the conservative approach that we only
+ // return null values if we *know* that the receiver is nil.
+ // This is because we can have surprises like:
+ //
+ // ... = [[NSScreens screens] objectAtIndex:0];
+ //
+ // What can happen is that [... screens] could return nil, but
+ // it most likely isn't nil. We should assume the semantics
+ // of this case unless we have *a lot* more knowledge.
+ //
+ SVal V = ValMgr.makeZeroVal(ME->getType());
+ MakeNode(Dst, ME, Pred, BindExpr(StNull, ME, V));
+ return;
+ }
+ }
+ }
+ }
+ // We have handled the cases where the receiver is nil. The remainder
+ // of this method should assume that the receiver is not nil.
+ if (!StNotNull)
+ return;
+
+ state = StNotNull;
+ }
+
+ // Check if the "raise" message was sent.
+ if (ME->getSelector() == RaiseSel)
+ RaisesException = true;
+ }
+ else {
+
+ IdentifierInfo* ClsName = ME->getClassName();
+ Selector S = ME->getSelector();
+
+ // Check for special instance methods.
+
+ if (!NSExceptionII) {
+ ASTContext& Ctx = getContext();
+
+ NSExceptionII = &Ctx.Idents.get("NSException");
+ }
+
+ if (ClsName == NSExceptionII) {
+
+ enum { NUM_RAISE_SELECTORS = 2 };
+
+ // Lazily create a cache of the selectors.
+
+ if (!NSExceptionInstanceRaiseSelectors) {
+
+ ASTContext& Ctx = getContext();
+
+ NSExceptionInstanceRaiseSelectors = new Selector[NUM_RAISE_SELECTORS];
+
+ llvm::SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
+ unsigned idx = 0;
+
+ // raise:format:
+ II.push_back(&Ctx.Idents.get("raise"));
+ II.push_back(&Ctx.Idents.get("format"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format::arguments:
+ II.push_back(&Ctx.Idents.get("arguments"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+ }
+
+ for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i)
+ if (S == NSExceptionInstanceRaiseSelectors[i]) {
+ RaisesException = true; break;
+ }
+ }
+ }
+
+ // Check for any arguments that are uninitialized/undefined.
+
+ for (ObjCMessageExpr::arg_iterator I = ME->arg_begin(), E = ME->arg_end();
+ I != E; ++I) {
+
+ if (GetSVal(state, *I).isUndef()) {
+
+ // Generate an error node for passing an uninitialized/undefined value
+ // as an argument to a message expression. This node is a sink.
+ NodeTy* N = Builder->generateNode(ME, state, Pred);
+
+ if (N) {
+ N->markAsSink();
+ MsgExprUndefArgs[N] = *I;
+ }
+
+ return;
+ }
+ }
+
+ // Check if we raise an exception. For now treat these as sinks. Eventually
+ // we will want to handle exceptions properly.
+
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+
+ if (RaisesException)
+ Builder->BuildSinks = true;
+
+ // Dispatch to plug-in transfer function.
+
+ unsigned size = Dst.size();
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+
+ EvalObjCMessageExpr(Dst, ME, Pred);
+
+ // Handle the case where no nodes where generated. Auto-generate that
+ // contains the updated state if we aren't generating sinks.
+
+ if (!Builder->BuildSinks && Dst.size() == size && !Builder->HasGeneratedNode)
+ MakeNode(Dst, ME, Pred, state);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Miscellaneous statements.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::VisitCastPointerToInteger(SVal V, const GRState* state,
+ QualType PtrTy,
+ Expr* CastE, NodeTy* Pred,
+ NodeSet& Dst) {
+ if (!V.isUnknownOrUndef()) {
+ // FIXME: Determine if the number of bits of the target type is
+ // equal or exceeds the number of bits to store the pointer value.
+ // If not, flag an error.
+ MakeNode(Dst, CastE, Pred, BindExpr(state, CastE, EvalCast(cast<Loc>(V),
+ CastE->getType())));
+ }
+ else
+ MakeNode(Dst, CastE, Pred, BindExpr(state, CastE, V));
+}
+
+
+void GRExprEngine::VisitCast(Expr* CastE, Expr* Ex, NodeTy* Pred, NodeSet& Dst){
+ NodeSet S1;
+ QualType T = CastE->getType();
+ QualType ExTy = Ex->getType();
+
+ if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
+ T = ExCast->getTypeAsWritten();
+
+ if (ExTy->isArrayType() || ExTy->isFunctionType() || T->isReferenceType())
+ VisitLValue(Ex, Pred, S1);
+ else
+ Visit(Ex, Pred, S1);
+
+ // Check for casting to "void".
+ if (T->isVoidType()) {
+ for (NodeSet::iterator I1 = S1.begin(), E1 = S1.end(); I1 != E1; ++I1)
+ Dst.Add(*I1);
+
+ return;
+ }
+
+ // FIXME: The rest of this should probably just go into EvalCall, and
+ // let the transfer function object be responsible for constructing
+ // nodes.
+
+ for (NodeSet::iterator I1 = S1.begin(), E1 = S1.end(); I1 != E1; ++I1) {
+ NodeTy* N = *I1;
+ const GRState* state = GetState(N);
+ SVal V = GetSVal(state, Ex);
+ ASTContext& C = getContext();
+
+ // Unknown?
+ if (V.isUnknown()) {
+ Dst.Add(N);
+ continue;
+ }
+
+ // Undefined?
+ if (V.isUndef())
+ goto PassThrough;
+
+ // For const casts, just propagate the value.
+ if (C.getCanonicalType(T).getUnqualifiedType() ==
+ C.getCanonicalType(ExTy).getUnqualifiedType())
+ goto PassThrough;
+
+ // Check for casts from pointers to integers.
+ if (T->isIntegerType() && Loc::IsLocType(ExTy)) {
+ VisitCastPointerToInteger(V, state, ExTy, CastE, N, Dst);
+ continue;
+ }
+
+ // Check for casts from integers to pointers.
+ if (Loc::IsLocType(T) && ExTy->isIntegerType()) {
+ if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&V)) {
+ // Just unpackage the lval and return it.
+ V = LV->getLoc();
+ MakeNode(Dst, CastE, N, BindExpr(state, CastE, V));
+ continue;
+ }
+
+ goto DispatchCast;
+ }
+
+ // Just pass through function and block pointers.
+ if (ExTy->isBlockPointerType() || ExTy->isFunctionPointerType()) {
+ assert(Loc::IsLocType(T));
+ goto PassThrough;
+ }
+
+ // Check for casts from array type to another type.
+ if (ExTy->isArrayType()) {
+ // We will always decay to a pointer.
+ V = StateMgr.ArrayToPointer(cast<Loc>(V));
+
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (T->isPointerType())
+ goto PassThrough;
+
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(T->isIntegerType());
+ QualType ElemTy = cast<ArrayType>(ExTy)->getElementType();
+ QualType PointerTy = getContext().getPointerType(ElemTy);
+ VisitCastPointerToInteger(V, state, PointerTy, CastE, N, Dst);
+ continue;
+ }
+
+ // Check for casts from a region to a specific type.
+ if (loc::MemRegionVal *RV = dyn_cast<loc::MemRegionVal>(&V)) {
+ // FIXME: For TypedViewRegions, we should handle the case where the
+ // underlying symbolic pointer is a function pointer or
+ // block pointer.
+
+ // FIXME: We should handle the case where we strip off view layers to get
+ // to a desugared type.
+
+ assert(Loc::IsLocType(T));
+ // We get a symbolic function pointer for a dereference of a function
+ // pointer, but it is of function type. Example:
+
+ // struct FPRec {
+ // void (*my_func)(int * x);
+ // };
+ //
+ // int bar(int x);
+ //
+ // int f1_a(struct FPRec* foo) {
+ // int x;
+ // (*foo->my_func)(&x);
+ // return bar(x)+1; // no-warning
+ // }
+
+ assert(Loc::IsLocType(ExTy) || ExTy->isFunctionType());
+
+ const MemRegion* R = RV->getRegion();
+ StoreManager& StoreMgr = getStoreManager();
+
+ // Delegate to store manager to get the result of casting a region
+ // to a different type.
+ const StoreManager::CastResult& Res = StoreMgr.CastRegion(state, R, T);
+
+ // Inspect the result. If the MemRegion* returned is NULL, this
+ // expression evaluates to UnknownVal.
+ R = Res.getRegion();
+ if (R) { V = loc::MemRegionVal(R); } else { V = UnknownVal(); }
+
+ // Generate the new node in the ExplodedGraph.
+ MakeNode(Dst, CastE, N, BindExpr(Res.getState(), CastE, V));
+ continue;
+ }
+ // All other cases.
+ DispatchCast: {
+ MakeNode(Dst, CastE, N, BindExpr(state, CastE,
+ EvalCast(V, CastE->getType())));
+ continue;
+ }
+
+ PassThrough: {
+ MakeNode(Dst, CastE, N, BindExpr(state, CastE, V));
+ }
+ }
+}
+
+void GRExprEngine::VisitCompoundLiteralExpr(CompoundLiteralExpr* CL,
+ NodeTy* Pred, NodeSet& Dst,
+ bool asLValue) {
+ InitListExpr* ILE = cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
+ NodeSet Tmp;
+ Visit(ILE, Pred, Tmp);
+
+ for (NodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I!=EI; ++I) {
+ const GRState* state = GetState(*I);
+ SVal ILV = GetSVal(state, ILE);
+ state = StateMgr.BindCompoundLiteral(state, CL, ILV);
+
+ if (asLValue)
+ MakeNode(Dst, CL, *I, BindExpr(state, CL, StateMgr.GetLValue(state, CL)));
+ else
+ MakeNode(Dst, CL, *I, BindExpr(state, CL, ILV));
+ }
+}
+
+void GRExprEngine::VisitDeclStmt(DeclStmt* DS, NodeTy* Pred, NodeSet& Dst) {
+
+ // The CFG has one DeclStmt per Decl.
+ Decl* D = *DS->decl_begin();
+
+ if (!D || !isa<VarDecl>(D))
+ return;
+
+ const VarDecl* VD = dyn_cast<VarDecl>(D);
+ Expr* InitEx = const_cast<Expr*>(VD->getInit());
+
+ // FIXME: static variables may have an initializer, but the second
+ // time a function is called those values may not be current.
+ NodeSet Tmp;
+
+ if (InitEx)
+ Visit(InitEx, Pred, Tmp);
+
+ if (Tmp.empty())
+ Tmp.Add(Pred);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ unsigned Count = Builder->getCurrentBlockCount();
+
+ // Check if 'VD' is a VLA and if so check if has a non-zero size.
+ QualType T = getContext().getCanonicalType(VD->getType());
+ if (VariableArrayType* VLA = dyn_cast<VariableArrayType>(T)) {
+ // FIXME: Handle multi-dimensional VLAs.
+
+ Expr* SE = VLA->getSizeExpr();
+ SVal Size = GetSVal(state, SE);
+
+ if (Size.isUndef()) {
+ if (NodeTy* N = Builder->generateNode(DS, state, Pred)) {
+ N->markAsSink();
+ ExplicitBadSizedVLA.insert(N);
+ }
+ continue;
+ }
+
+ bool isFeasibleZero = false;
+ const GRState* ZeroSt = Assume(state, Size, false, isFeasibleZero);
+
+ bool isFeasibleNotZero = false;
+ state = Assume(state, Size, true, isFeasibleNotZero);
+
+ if (isFeasibleZero) {
+ if (NodeTy* N = Builder->generateNode(DS, ZeroSt, Pred)) {
+ N->markAsSink();
+ if (isFeasibleNotZero) ImplicitBadSizedVLA.insert(N);
+ else ExplicitBadSizedVLA.insert(N);
+ }
+ }
+
+ if (!isFeasibleNotZero)
+ continue;
+ }
+
+ // Decls without InitExpr are not initialized explicitly.
+ if (InitEx) {
+ SVal InitVal = GetSVal(state, InitEx);
+ QualType T = VD->getType();
+
+ // Recover some path-sensitivity if a scalar value evaluated to
+ // UnknownVal.
+ if (InitVal.isUnknown() ||
+ !getConstraintManager().canReasonAbout(InitVal)) {
+ InitVal = ValMgr.getConjuredSymbolVal(InitEx, Count);
+ }
+
+ state = StateMgr.BindDecl(state, VD, InitVal);
+
+ // The next thing to do is check if the GRTransferFuncs object wants to
+ // update the state based on the new binding. If the GRTransferFunc
+ // object doesn't do anything, just auto-propagate the current state.
+ GRStmtNodeBuilderRef BuilderRef(Dst, *Builder, *this, *I, state, DS,true);
+ getTF().EvalBind(BuilderRef, loc::MemRegionVal(StateMgr.getRegion(VD)),
+ InitVal);
+ }
+ else {
+ state = StateMgr.BindDeclWithNoInit(state, VD);
+ MakeNode(Dst, DS, *I, state);
+ }
+ }
+}
+
+namespace {
+ // This class is used by VisitInitListExpr as an item in a worklist
+ // for processing the values contained in an InitListExpr.
+class VISIBILITY_HIDDEN InitListWLItem {
+public:
+ llvm::ImmutableList<SVal> Vals;
+ GRExprEngine::NodeTy* N;
+ InitListExpr::reverse_iterator Itr;
+
+ InitListWLItem(GRExprEngine::NodeTy* n, llvm::ImmutableList<SVal> vals,
+ InitListExpr::reverse_iterator itr)
+ : Vals(vals), N(n), Itr(itr) {}
+};
+}
+
+
+void GRExprEngine::VisitInitListExpr(InitListExpr* E, NodeTy* Pred,
+ NodeSet& Dst) {
+
+ const GRState* state = GetState(Pred);
+ QualType T = getContext().getCanonicalType(E->getType());
+ unsigned NumInitElements = E->getNumInits();
+
+ if (T->isArrayType() || T->isStructureType()) {
+
+ llvm::ImmutableList<SVal> StartVals = getBasicVals().getEmptySValList();
+
+ // Handle base case where the initializer has no elements.
+ // e.g: static int* myArray[] = {};
+ if (NumInitElements == 0) {
+ SVal V = NonLoc::MakeCompoundVal(T, StartVals, getBasicVals());
+ MakeNode(Dst, E, Pred, BindExpr(state, E, V));
+ return;
+ }
+
+ // Create a worklist to process the initializers.
+ llvm::SmallVector<InitListWLItem, 10> WorkList;
+ WorkList.reserve(NumInitElements);
+ WorkList.push_back(InitListWLItem(Pred, StartVals, E->rbegin()));
+ InitListExpr::reverse_iterator ItrEnd = E->rend();
+
+ // Process the worklist until it is empty.
+ while (!WorkList.empty()) {
+ InitListWLItem X = WorkList.back();
+ WorkList.pop_back();
+
+ NodeSet Tmp;
+ Visit(*X.Itr, X.N, Tmp);
+
+ InitListExpr::reverse_iterator NewItr = X.Itr + 1;
+
+ for (NodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
+ // Get the last initializer value.
+ state = GetState(*NI);
+ SVal InitV = GetSVal(state, cast<Expr>(*X.Itr));
+
+ // Construct the new list of values by prepending the new value to
+ // the already constructed list.
+ llvm::ImmutableList<SVal> NewVals =
+ getBasicVals().consVals(InitV, X.Vals);
+
+ if (NewItr == ItrEnd) {
+ // Now we have a list holding all init values. Make CompoundValData.
+ SVal V = NonLoc::MakeCompoundVal(T, NewVals, getBasicVals());
+
+ // Make final state and node.
+ MakeNode(Dst, E, *NI, BindExpr(state, E, V));
+ }
+ else {
+ // Still some initializer values to go. Push them onto the worklist.
+ WorkList.push_back(InitListWLItem(*NI, NewVals, NewItr));
+ }
+ }
+ }
+
+ return;
+ }
+
+ if (T->isUnionType() || T->isVectorType()) {
+ // FIXME: to be implemented.
+ // Note: That vectors can return true for T->isIntegerType()
+ MakeNode(Dst, E, Pred, state);
+ return;
+ }
+
+ if (Loc::IsLocType(T) || T->isIntegerType()) {
+ assert (E->getNumInits() == 1);
+ NodeSet Tmp;
+ Expr* Init = E->getInit(0);
+ Visit(Init, Pred, Tmp);
+ for (NodeSet::iterator I = Tmp.begin(), EI = Tmp.end(); I != EI; ++I) {
+ state = GetState(*I);
+ MakeNode(Dst, E, *I, BindExpr(state, E, GetSVal(state, Init)));
+ }
+ return;
+ }
+
+
+ printf("InitListExpr type = %s\n", T.getAsString().c_str());
+ assert(0 && "unprocessed InitListExpr type");
+}
+
+/// VisitSizeOfAlignOfExpr - Transfer function for sizeof(type).
+void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex,
+ NodeTy* Pred,
+ NodeSet& Dst) {
+ QualType T = Ex->getTypeOfArgument();
+ uint64_t amt;
+
+ if (Ex->isSizeOf()) {
+ if (T == getContext().VoidTy) {
+ // sizeof(void) == 1 byte.
+ amt = 1;
+ }
+ else if (!T.getTypePtr()->isConstantSizeType()) {
+ // FIXME: Add support for VLAs.
+ return;
+ }
+ else if (T->isObjCInterfaceType()) {
+ // Some code tries to take the sizeof an ObjCInterfaceType, relying that
+ // the compiler has laid out its representation. Just report Unknown
+ // for these.
+ return;
+ }
+ else {
+ // All other cases.
+ amt = getContext().getTypeSize(T) / 8;
+ }
+ }
+ else // Get alignment of the type.
+ amt = getContext().getTypeAlign(T) / 8;
+
+ MakeNode(Dst, Ex, Pred,
+ BindExpr(GetState(Pred), Ex,
+ NonLoc::MakeVal(getBasicVals(), amt, Ex->getType())));
+}
+
+
+void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, NodeTy* Pred,
+ NodeSet& Dst, bool asLValue) {
+
+ switch (U->getOpcode()) {
+
+ default:
+ break;
+
+ case UnaryOperator::Deref: {
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ NodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ const GRState* state = GetState(*I);
+ SVal location = GetSVal(state, Ex);
+
+ if (asLValue)
+ MakeNode(Dst, U, *I, BindExpr(state, U, location),
+ ProgramPoint::PostLValueKind);
+ else
+ EvalLoad(Dst, U, *I, state, location);
+ }
+
+ return;
+ }
+
+ case UnaryOperator::Real: {
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ NodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ Dst.Add(*I);
+ continue;
+ }
+
+ // For all other types, UnaryOperator::Real is an identity operation.
+ assert (U->getType() == Ex->getType());
+ const GRState* state = GetState(*I);
+ MakeNode(Dst, U, *I, BindExpr(state, U, GetSVal(state, Ex)));
+ }
+
+ return;
+ }
+
+ case UnaryOperator::Imag: {
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ NodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ Dst.Add(*I);
+ continue;
+ }
+
+ // For all other types, UnaryOperator::Float returns 0.
+ assert (Ex->getType()->isIntegerType());
+ const GRState* state = GetState(*I);
+ SVal X = NonLoc::MakeVal(getBasicVals(), 0, Ex->getType());
+ MakeNode(Dst, U, *I, BindExpr(state, U, X));
+ }
+
+ return;
+ }
+
+ // FIXME: Just report "Unknown" for OffsetOf.
+ case UnaryOperator::OffsetOf:
+ Dst.Add(Pred);
+ return;
+
+ case UnaryOperator::Plus: assert (!asLValue); // FALL-THROUGH.
+ case UnaryOperator::Extension: {
+
+ // Unary "+" is a no-op, similar to a parentheses. We still have places
+ // where it may be a block-level expression, so we need to
+ // generate an extra node that just propagates the value of the
+ // subexpression.
+
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ NodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ MakeNode(Dst, U, *I, BindExpr(state, U, GetSVal(state, Ex)));
+ }
+
+ return;
+ }
+
+ case UnaryOperator::AddrOf: {
+
+ assert(!asLValue);
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ NodeSet Tmp;
+ VisitLValue(Ex, Pred, Tmp);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ SVal V = GetSVal(state, Ex);
+ state = BindExpr(state, U, V);
+ MakeNode(Dst, U, *I, state);
+ }
+
+ return;
+ }
+
+ case UnaryOperator::LNot:
+ case UnaryOperator::Minus:
+ case UnaryOperator::Not: {
+
+ assert (!asLValue);
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ NodeSet Tmp;
+ Visit(Ex, Pred, Tmp);
+
+ for (NodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+
+ // Get the value of the subexpression.
+ SVal V = GetSVal(state, Ex);
+
+ if (V.isUnknownOrUndef()) {
+ MakeNode(Dst, U, *I, BindExpr(state, U, V));
+ continue;
+ }
+
+// QualType DstT = getContext().getCanonicalType(U->getType());
+// QualType SrcT = getContext().getCanonicalType(Ex->getType());
+//
+// if (DstT != SrcT) // Perform promotions.
+// V = EvalCast(V, DstT);
+//
+// if (V.isUnknownOrUndef()) {
+// MakeNode(Dst, U, *I, BindExpr(St, U, V));
+// continue;
+// }
+
+ switch (U->getOpcode()) {
+ default:
+ assert(false && "Invalid Opcode.");
+ break;
+
+ case UnaryOperator::Not:
+ // FIXME: Do we need to handle promotions?
+ state = BindExpr(state, U, EvalComplement(cast<NonLoc>(V)));
+ break;
+
+ case UnaryOperator::Minus:
+ // FIXME: Do we need to handle promotions?
+ state = BindExpr(state, U, EvalMinus(U, cast<NonLoc>(V)));
+ break;
+
+ case UnaryOperator::LNot:
+
+ // C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
+ //
+ // Note: technically we do "E == 0", but this is the same in the
+ // transfer functions as "0 == E".
+
+ if (isa<Loc>(V)) {
+ Loc X = Loc::MakeNull(getBasicVals());
+ SVal Result = EvalBinOp(state,BinaryOperator::EQ, cast<Loc>(V), X,
+ U->getType());
+ state = BindExpr(state, U, Result);
+ }
+ else {
+ nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
+#if 0
+ SVal Result = EvalBinOp(BinaryOperator::EQ, cast<NonLoc>(V), X);
+ state = SetSVal(state, U, Result);
+#else
+ EvalBinOp(Dst, U, BinaryOperator::EQ, cast<NonLoc>(V), X, *I,
+ U->getType());
+ continue;
+#endif
+ }
+
+ break;
+ }
+
+ MakeNode(Dst, U, *I, state);
+ }
+
+ return;
+ }
+ }
+
+ // Handle ++ and -- (both pre- and post-increment).
+
+ assert (U->isIncrementDecrementOp());
+ NodeSet Tmp;
+ Expr* Ex = U->getSubExpr()->IgnoreParens();
+ VisitLValue(Ex, Pred, Tmp);
+
+ for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I!=E; ++I) {
+
+ const GRState* state = GetState(*I);
+ SVal V1 = GetSVal(state, Ex);
+
+ // Perform a load.
+ NodeSet Tmp2;
+ EvalLoad(Tmp2, Ex, *I, state, V1);
+
+ for (NodeSet::iterator I2 = Tmp2.begin(), E2 = Tmp2.end(); I2!=E2; ++I2) {
+
+ state = GetState(*I2);
+ SVal V2 = GetSVal(state, Ex);
+
+ // Propagate unknown and undefined values.
+ if (V2.isUnknownOrUndef()) {
+ MakeNode(Dst, U, *I2, BindExpr(state, U, V2));
+ continue;
+ }
+
+ // Handle all other values.
+ BinaryOperator::Opcode Op = U->isIncrementOp() ? BinaryOperator::Add
+ : BinaryOperator::Sub;
+
+ SVal Result = EvalBinOp(state, Op, V2, MakeConstantVal(1U, U),
+ U->getType());
+
+ // Conjure a new symbol if necessary to recover precision.
+ if (Result.isUnknown() || !getConstraintManager().canReasonAbout(Result)){
+ Result = ValMgr.getConjuredSymbolVal(Ex,
+ Builder->getCurrentBlockCount());
+
+ // If the value is a location, ++/-- should always preserve
+ // non-nullness. Check if the original value was non-null, and if so propagate
+ // that constraint.
+ if (Loc::IsLocType(U->getType())) {
+ SVal Constraint = EvalBinOp(state, BinaryOperator::EQ, V2,
+ ValMgr.makeZeroVal(U->getType()),
+ getContext().IntTy);
+
+ bool isFeasible = false;
+ Assume(state, Constraint, true, isFeasible);
+ if (!isFeasible) {
+ // It isn't feasible for the original value to be null.
+ // Propagate this constraint.
+ Constraint = EvalBinOp(state, BinaryOperator::EQ, Result,
+ ValMgr.makeZeroVal(U->getType()),
+ getContext().IntTy);
+
+ bool isFeasible = false;
+ state = Assume(state, Constraint, false, isFeasible);
+ assert(isFeasible && state);
+ }
+ }
+ }
+
+ state = BindExpr(state, U, U->isPostfix() ? V2 : Result);
+
+ // Perform the store.
+ EvalStore(Dst, U, *I2, state, V1, Result);
+ }
+ }
+}
+
+void GRExprEngine::VisitAsmStmt(AsmStmt* A, NodeTy* Pred, NodeSet& Dst) {
+ VisitAsmStmtHelperOutputs(A, A->begin_outputs(), A->end_outputs(), Pred, Dst);
+}
+
+void GRExprEngine::VisitAsmStmtHelperOutputs(AsmStmt* A,
+ AsmStmt::outputs_iterator I,
+ AsmStmt::outputs_iterator E,
+ NodeTy* Pred, NodeSet& Dst) {
+ if (I == E) {
+ VisitAsmStmtHelperInputs(A, A->begin_inputs(), A->end_inputs(), Pred, Dst);
+ return;
+ }
+
+ NodeSet Tmp;
+ VisitLValue(*I, Pred, Tmp);
+
+ ++I;
+
+ for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+ VisitAsmStmtHelperOutputs(A, I, E, *NI, Dst);
+}
+
+void GRExprEngine::VisitAsmStmtHelperInputs(AsmStmt* A,
+ AsmStmt::inputs_iterator I,
+ AsmStmt::inputs_iterator E,
+ NodeTy* Pred, NodeSet& Dst) {
+ if (I == E) {
+
+ // We have processed both the inputs and the outputs. All of the outputs
+ // should evaluate to Locs. Nuke all of their values.
+
+ // FIXME: Some day in the future it would be nice to allow a "plug-in"
+ // which interprets the inline asm and stores proper results in the
+ // outputs.
+
+ const GRState* state = GetState(Pred);
+
+ for (AsmStmt::outputs_iterator OI = A->begin_outputs(),
+ OE = A->end_outputs(); OI != OE; ++OI) {
+
+ SVal X = GetSVal(state, *OI);
+ assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
+
+ if (isa<Loc>(X))
+ state = BindLoc(state, cast<Loc>(X), UnknownVal());
+ }
+
+ MakeNode(Dst, A, Pred, state);
+ return;
+ }
+
+ NodeSet Tmp;
+ Visit(*I, Pred, Tmp);
+
+ ++I;
+
+ for (NodeSet::iterator NI = Tmp.begin(), NE = Tmp.end(); NI != NE; ++NI)
+ VisitAsmStmtHelperInputs(A, I, E, *NI, Dst);
+}
+
+void GRExprEngine::EvalReturn(NodeSet& Dst, ReturnStmt* S, NodeTy* Pred) {
+ assert (Builder && "GRStmtNodeBuilder must be defined.");
+
+ unsigned size = Dst.size();
+
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ SaveOr OldHasGen(Builder->HasGeneratedNode);
+
+ getTF().EvalReturn(Dst, *this, *Builder, S, Pred);
+
+ // Handle the case where no nodes where generated.
+
+ if (!Builder->BuildSinks && Dst.size() == size && !Builder->HasGeneratedNode)
+ MakeNode(Dst, S, Pred, GetState(Pred));
+}
+
+void GRExprEngine::VisitReturnStmt(ReturnStmt* S, NodeTy* Pred, NodeSet& Dst) {
+
+ Expr* R = S->getRetValue();
+
+ if (!R) {
+ EvalReturn(Dst, S, Pred);
+ return;
+ }
+
+ NodeSet Tmp;
+ Visit(R, Pred, Tmp);
+
+ for (NodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+ SVal X = GetSVal((*I)->getState(), R);
+
+ // Check if we return the address of a stack variable.
+ if (isa<loc::MemRegionVal>(X)) {
+ // Determine if the value is on the stack.
+ const MemRegion* R = cast<loc::MemRegionVal>(&X)->getRegion();
+
+ if (R && getStateManager().hasStackStorage(R)) {
+ // Create a special node representing the error.
+ if (NodeTy* N = Builder->generateNode(S, GetState(*I), *I)) {
+ N->markAsSink();
+ RetsStackAddr.insert(N);
+ }
+ continue;
+ }
+ }
+ // Check if we return an undefined value.
+ else if (X.isUndef()) {
+ if (NodeTy* N = Builder->generateNode(S, GetState(*I), *I)) {
+ N->markAsSink();
+ RetsUndef.insert(N);
+ }
+ continue;
+ }
+
+ EvalReturn(Dst, S, *I);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Binary operators.
+//===----------------------------------------------------------------------===//
+
+const GRState* GRExprEngine::CheckDivideZero(Expr* Ex, const GRState* state,
+ NodeTy* Pred, SVal Denom) {
+
+ // Divide by undefined? (potentially zero)
+
+ if (Denom.isUndef()) {
+ NodeTy* DivUndef = Builder->generateNode(Ex, state, Pred);
+
+ if (DivUndef) {
+ DivUndef->markAsSink();
+ ExplicitBadDivides.insert(DivUndef);
+ }
+
+ return 0;
+ }
+
+ // Check for divide/remainder-by-zero.
+ // First, "assume" that the denominator is 0 or undefined.
+
+ bool isFeasibleZero = false;
+ const GRState* ZeroSt = Assume(state, Denom, false, isFeasibleZero);
+
+ // Second, "assume" that the denominator cannot be 0.
+
+ bool isFeasibleNotZero = false;
+ state = Assume(state, Denom, true, isFeasibleNotZero);
+
+ // Create the node for the divide-by-zero (if it occurred).
+
+ if (isFeasibleZero)
+ if (NodeTy* DivZeroNode = Builder->generateNode(Ex, ZeroSt, Pred)) {
+ DivZeroNode->markAsSink();
+
+ if (isFeasibleNotZero)
+ ImplicitBadDivides.insert(DivZeroNode);
+ else
+ ExplicitBadDivides.insert(DivZeroNode);
+
+ }
+
+ return isFeasibleNotZero ? state : 0;
+}
+
+void GRExprEngine::VisitBinaryOperator(BinaryOperator* B,
+ GRExprEngine::NodeTy* Pred,
+ GRExprEngine::NodeSet& Dst) {
+
+ NodeSet Tmp1;
+ Expr* LHS = B->getLHS()->IgnoreParens();
+ Expr* RHS = B->getRHS()->IgnoreParens();
+
+ // FIXME: Add proper support for ObjCKVCRefExpr.
+ if (isa<ObjCKVCRefExpr>(LHS)) {
+ Visit(RHS, Pred, Dst);
+ return;
+ }
+
+ if (B->isAssignmentOp())
+ VisitLValue(LHS, Pred, Tmp1);
+ else
+ Visit(LHS, Pred, Tmp1);
+
+ for (NodeSet::iterator I1=Tmp1.begin(), E1=Tmp1.end(); I1 != E1; ++I1) {
+
+ SVal LeftV = GetSVal((*I1)->getState(), LHS);
+
+ // Process the RHS.
+
+ NodeSet Tmp2;
+ Visit(RHS, *I1, Tmp2);
+
+ // With both the LHS and RHS evaluated, process the operation itself.
+
+ for (NodeSet::iterator I2=Tmp2.begin(), E2=Tmp2.end(); I2 != E2; ++I2) {
+
+ const GRState* state = GetState(*I2);
+ const GRState* OldSt = state;
+
+ SVal RightV = GetSVal(state, RHS);
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ switch (Op) {
+
+ case BinaryOperator::Assign: {
+
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+ QualType T = RHS->getType();
+
+ if ((RightV.isUnknown() ||
+ !getConstraintManager().canReasonAbout(RightV))
+ && (Loc::IsLocType(T) ||
+ (T->isScalarType() && T->isIntegerType()))) {
+ unsigned Count = Builder->getCurrentBlockCount();
+ RightV = ValMgr.getConjuredSymbolVal(B->getRHS(), Count);
+ }
+
+ // Simulate the effects of a "store": bind the value of the RHS
+ // to the L-Value represented by the LHS.
+ EvalStore(Dst, B, LHS, *I2, BindExpr(state, B, RightV), LeftV,
+ RightV);
+ continue;
+ }
+
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+
+ // Special checking for integer denominators.
+ if (RHS->getType()->isIntegerType() &&
+ RHS->getType()->isScalarType()) {
+
+ state = CheckDivideZero(B, state, *I2, RightV);
+ if (!state) continue;
+ }
+
+ // FALL-THROUGH.
+
+ default: {
+
+ if (B->isAssignmentOp())
+ break;
+
+ // Process non-assignements except commas or short-circuited
+ // logical expressions (LAnd and LOr).
+
+ SVal Result = EvalBinOp(state, Op, LeftV, RightV, B->getType());
+
+ if (Result.isUnknown()) {
+ if (OldSt != state) {
+ // Generate a new node if we have already created a new state.
+ MakeNode(Dst, B, *I2, state);
+ }
+ else
+ Dst.Add(*I2);
+
+ continue;
+ }
+
+ if (Result.isUndef() && !LeftV.isUndef() && !RightV.isUndef()) {
+
+ // The operands were *not* undefined, but the result is undefined.
+ // This is a special node that should be flagged as an error.
+
+ if (NodeTy* UndefNode = Builder->generateNode(B, state, *I2)) {
+ UndefNode->markAsSink();
+ UndefResults.insert(UndefNode);
+ }
+
+ continue;
+ }
+
+ // Otherwise, create a new node.
+
+ MakeNode(Dst, B, *I2, BindExpr(state, B, Result));
+ continue;
+ }
+ }
+
+ assert (B->isCompoundAssignmentOp());
+
+ switch (Op) {
+ default:
+ assert(0 && "Invalid opcode for compound assignment.");
+ case BinaryOperator::MulAssign: Op = BinaryOperator::Mul; break;
+ case BinaryOperator::DivAssign: Op = BinaryOperator::Div; break;
+ case BinaryOperator::RemAssign: Op = BinaryOperator::Rem; break;
+ case BinaryOperator::AddAssign: Op = BinaryOperator::Add; break;
+ case BinaryOperator::SubAssign: Op = BinaryOperator::Sub; break;
+ case BinaryOperator::ShlAssign: Op = BinaryOperator::Shl; break;
+ case BinaryOperator::ShrAssign: Op = BinaryOperator::Shr; break;
+ case BinaryOperator::AndAssign: Op = BinaryOperator::And; break;
+ case BinaryOperator::XorAssign: Op = BinaryOperator::Xor; break;
+ case BinaryOperator::OrAssign: Op = BinaryOperator::Or; break;
+ }
+
+ // Perform a load (the LHS). This performs the checks for
+ // null dereferences, and so on.
+ NodeSet Tmp3;
+ SVal location = GetSVal(state, LHS);
+ EvalLoad(Tmp3, LHS, *I2, state, location);
+
+ for (NodeSet::iterator I3=Tmp3.begin(), E3=Tmp3.end(); I3!=E3; ++I3) {
+
+ state = GetState(*I3);
+ SVal V = GetSVal(state, LHS);
+
+ // Check for divide-by-zero.
+ if ((Op == BinaryOperator::Div || Op == BinaryOperator::Rem)
+ && RHS->getType()->isIntegerType()
+ && RHS->getType()->isScalarType()) {
+
+ // CheckDivideZero returns a new state where the denominator
+ // is assumed to be non-zero.
+ state = CheckDivideZero(B, state, *I3, RightV);
+
+ if (!state)
+ continue;
+ }
+
+ // Propagate undefined values (left-side).
+ if (V.isUndef()) {
+ EvalStore(Dst, B, LHS, *I3, BindExpr(state, B, V), location, V);
+ continue;
+ }
+
+ // Propagate unknown values (left and right-side).
+ if (RightV.isUnknown() || V.isUnknown()) {
+ EvalStore(Dst, B, LHS, *I3, BindExpr(state, B, UnknownVal()),
+ location, UnknownVal());
+ continue;
+ }
+
+ // At this point:
+ //
+ // The LHS is not Undef/Unknown.
+ // The RHS is not Unknown.
+
+ // Get the computation type.
+ QualType CTy = cast<CompoundAssignOperator>(B)->getComputationResultType();
+ CTy = getContext().getCanonicalType(CTy);
+
+ QualType CLHSTy = cast<CompoundAssignOperator>(B)->getComputationLHSType();
+ CLHSTy = getContext().getCanonicalType(CTy);
+
+ QualType LTy = getContext().getCanonicalType(LHS->getType());
+ QualType RTy = getContext().getCanonicalType(RHS->getType());
+
+ // Promote LHS.
+ V = EvalCast(V, CLHSTy);
+
+ // Evaluate operands and promote to result type.
+ if (RightV.isUndef()) {
+ // Propagate undefined values (right-side).
+ EvalStore(Dst, B, LHS, *I3, BindExpr(state, B, RightV), location,
+ RightV);
+ continue;
+ }
+
+ // Compute the result of the operation.
+ SVal Result = EvalCast(EvalBinOp(state, Op, V, RightV, CTy),
+ B->getType());
+
+ if (Result.isUndef()) {
+ // The operands were not undefined, but the result is undefined.
+ if (NodeTy* UndefNode = Builder->generateNode(B, state, *I3)) {
+ UndefNode->markAsSink();
+ UndefResults.insert(UndefNode);
+ }
+ continue;
+ }
+
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+
+ SVal LHSVal;
+
+ if ((Result.isUnknown() ||
+ !getConstraintManager().canReasonAbout(Result))
+ && (Loc::IsLocType(CTy)
+ || (CTy->isScalarType() && CTy->isIntegerType()))) {
+
+ unsigned Count = Builder->getCurrentBlockCount();
+
+ // The symbolic value is actually for the type of the left-hand side
+ // expression, not the computation type, as this is the value the
+ // LValue on the LHS will bind to.
+ LHSVal = ValMgr.getConjuredSymbolVal(B->getRHS(), LTy, Count);
+
+ // However, we need to convert the symbol to the computation type.
+ Result = (LTy == CTy) ? LHSVal : EvalCast(LHSVal,CTy);
+ }
+ else {
+ // The left-hand side may bind to a different value then the
+ // computation type.
+ LHSVal = (LTy == CTy) ? Result : EvalCast(Result,LTy);
+ }
+
+ EvalStore(Dst, B, LHS, *I3, BindExpr(state, B, Result), location,
+ LHSVal);
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer-function Helpers.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::EvalBinOp(ExplodedNodeSet<GRState>& Dst, Expr* Ex,
+ BinaryOperator::Opcode Op,
+ NonLoc L, NonLoc R,
+ ExplodedNode<GRState>* Pred, QualType T) {
+
+ GRStateSet OStates;
+ EvalBinOp(OStates, GetState(Pred), Ex, Op, L, R, T);
+
+ for (GRStateSet::iterator I=OStates.begin(), E=OStates.end(); I!=E; ++I)
+ MakeNode(Dst, Ex, Pred, *I);
+}
+
+void GRExprEngine::EvalBinOp(GRStateSet& OStates, const GRState* state,
+ Expr* Ex, BinaryOperator::Opcode Op,
+ NonLoc L, NonLoc R, QualType T) {
+
+ GRStateSet::AutoPopulate AP(OStates, state);
+ if (R.isValid()) getTF().EvalBinOpNN(OStates, *this, state, Ex, Op, L, R, T);
+}
+
+SVal GRExprEngine::EvalBinOp(const GRState* state, BinaryOperator::Opcode Op,
+ SVal L, SVal R, QualType T) {
+
+ if (L.isUndef() || R.isUndef())
+ return UndefinedVal();
+
+ if (L.isUnknown() || R.isUnknown())
+ return UnknownVal();
+
+ if (isa<Loc>(L)) {
+ if (isa<Loc>(R))
+ return getTF().EvalBinOp(*this, Op, cast<Loc>(L), cast<Loc>(R));
+ else
+ return getTF().EvalBinOp(*this, state, Op, cast<Loc>(L), cast<NonLoc>(R));
+ }
+
+ if (isa<Loc>(R)) {
+ // Support pointer arithmetic where the increment/decrement operand
+ // is on the left and the pointer on the right.
+
+ assert (Op == BinaryOperator::Add || Op == BinaryOperator::Sub);
+
+ // Commute the operands.
+ return getTF().EvalBinOp(*this, state, Op, cast<Loc>(R), cast<NonLoc>(L));
+ }
+ else
+ return getTF().DetermEvalBinOpNN(*this, Op, cast<NonLoc>(L),
+ cast<NonLoc>(R), T);
+}
+
+//===----------------------------------------------------------------------===//
+// Visualization.
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+static GRExprEngine* GraphPrintCheckerState;
+static SourceManager* GraphPrintSourceManager;
+
+namespace llvm {
+template<>
+struct VISIBILITY_HIDDEN DOTGraphTraits<GRExprEngine::NodeTy*> :
+ public DefaultDOTGraphTraits {
+
+ static std::string getNodeAttributes(const GRExprEngine::NodeTy* N, void*) {
+
+ if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
+ GraphPrintCheckerState->isExplicitNullDeref(N) ||
+ GraphPrintCheckerState->isUndefDeref(N) ||
+ GraphPrintCheckerState->isUndefStore(N) ||
+ GraphPrintCheckerState->isUndefControlFlow(N) ||
+ GraphPrintCheckerState->isExplicitBadDivide(N) ||
+ GraphPrintCheckerState->isImplicitBadDivide(N) ||
+ GraphPrintCheckerState->isUndefResult(N) ||
+ GraphPrintCheckerState->isBadCall(N) ||
+ GraphPrintCheckerState->isUndefArg(N))
+ return "color=\"red\",style=\"filled\"";
+
+ if (GraphPrintCheckerState->isNoReturnCall(N))
+ return "color=\"blue\",style=\"filled\"";
+
+ return "";
+ }
+
+ static std::string getNodeLabel(const GRExprEngine::NodeTy* N, void*) {
+ std::ostringstream Out;
+
+ // Program Location.
+ ProgramPoint Loc = N->getLocation();
+
+ switch (Loc.getKind()) {
+ case ProgramPoint::BlockEntranceKind:
+ Out << "Block Entrance: B"
+ << cast<BlockEntrance>(Loc).getBlock()->getBlockID();
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false);
+ break;
+
+ default: {
+ if (isa<PostStmt>(Loc)) {
+ const PostStmt& L = cast<PostStmt>(Loc);
+ Stmt* S = L.getStmt();
+ SourceLocation SLoc = S->getLocStart();
+
+ Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
+ llvm::raw_os_ostream OutS(Out);
+ S->printPretty(OutS);
+ OutS.flush();
+
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getInstantiationColumnNumber(SLoc)
+ << "\\l";
+ }
+
+ if (isa<PostLoad>(Loc))
+ Out << "\\lPostLoad\\l;";
+ else if (isa<PostStore>(Loc))
+ Out << "\\lPostStore\\l";
+ else if (isa<PostLValue>(Loc))
+ Out << "\\lPostLValue\\l";
+ else if (isa<PostLocationChecksSucceed>(Loc))
+ Out << "\\lPostLocationChecksSucceed\\l";
+ else if (isa<PostNullCheckFailed>(Loc))
+ Out << "\\lPostNullCheckFailed\\l";
+
+ if (GraphPrintCheckerState->isImplicitNullDeref(N))
+ Out << "\\|Implicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isExplicitNullDeref(N))
+ Out << "\\|Explicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isUndefDeref(N))
+ Out << "\\|Dereference of undefialied value.\\l";
+ else if (GraphPrintCheckerState->isUndefStore(N))
+ Out << "\\|Store to Undefined Loc.";
+ else if (GraphPrintCheckerState->isExplicitBadDivide(N))
+ Out << "\\|Explicit divide-by zero or undefined value.";
+ else if (GraphPrintCheckerState->isImplicitBadDivide(N))
+ Out << "\\|Implicit divide-by zero or undefined value.";
+ else if (GraphPrintCheckerState->isUndefResult(N))
+ Out << "\\|Result of operation is undefined.";
+ else if (GraphPrintCheckerState->isNoReturnCall(N))
+ Out << "\\|Call to function marked \"noreturn\".";
+ else if (GraphPrintCheckerState->isBadCall(N))
+ Out << "\\|Call to NULL/Undefined.";
+ else if (GraphPrintCheckerState->isUndefArg(N))
+ Out << "\\|Argument in call is undefined";
+
+ break;
+ }
+
+ const BlockEdge& E = cast<BlockEdge>(Loc);
+ Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
+ << E.getDst()->getBlockID() << ')';
+
+ if (Stmt* T = E.getSrc()->getTerminator()) {
+
+ SourceLocation SLoc = T->getLocStart();
+
+ Out << "\\|Terminator: ";
+
+ llvm::raw_os_ostream OutS(Out);
+ E.getSrc()->printTerminator(OutS);
+ OutS.flush();
+
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getInstantiationLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getInstantiationColumnNumber(SLoc);
+ }
+
+ if (isa<SwitchStmt>(T)) {
+ Stmt* Label = E.getDst()->getLabel();
+
+ if (Label) {
+ if (CaseStmt* C = dyn_cast<CaseStmt>(Label)) {
+ Out << "\\lcase ";
+ llvm::raw_os_ostream OutS(Out);
+ C->getLHS()->printPretty(OutS);
+ OutS.flush();
+
+ if (Stmt* RHS = C->getRHS()) {
+ Out << " .. ";
+ RHS->printPretty(OutS);
+ OutS.flush();
+ }
+
+ Out << ":";
+ }
+ else {
+ assert (isa<DefaultStmt>(Label));
+ Out << "\\ldefault:";
+ }
+ }
+ else
+ Out << "\\l(implicit) default:";
+ }
+ else if (isa<IndirectGotoStmt>(T)) {
+ // FIXME
+ }
+ else {
+ Out << "\\lCondition: ";
+ if (*E.getSrc()->succ_begin() == E.getDst())
+ Out << "true";
+ else
+ Out << "false";
+ }
+
+ Out << "\\l";
+ }
+
+ if (GraphPrintCheckerState->isUndefControlFlow(N)) {
+ Out << "\\|Control-flow based on\\lUndefined value.\\l";
+ }
+ }
+ }
+
+ Out << "\\|StateID: " << (void*) N->getState() << "\\|";
+
+ GRStateRef state(N->getState(), GraphPrintCheckerState->getStateManager());
+ state.printDOT(Out);
+
+ Out << "\\l";
+ return Out.str();
+ }
+};
+} // end llvm namespace
+#endif
+
+#ifndef NDEBUG
+template <typename ITERATOR>
+GRExprEngine::NodeTy* GetGraphNode(ITERATOR I) { return *I; }
+
+template <>
+GRExprEngine::NodeTy*
+GetGraphNode<llvm::DenseMap<GRExprEngine::NodeTy*, Expr*>::iterator>
+ (llvm::DenseMap<GRExprEngine::NodeTy*, Expr*>::iterator I) {
+ return I->first;
+}
+#endif
+
+void GRExprEngine::ViewGraph(bool trim) {
+#ifndef NDEBUG
+ if (trim) {
+ std::vector<NodeTy*> Src;
+
+ // Flush any outstanding reports to make sure we cover all the nodes.
+ // This does not cause them to get displayed.
+ for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(BR);
+
+ // Iterate through the reports and get their nodes.
+ for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I) {
+ for (BugType::const_iterator I2=(*I)->begin(), E2=(*I)->end(); I2!=E2; ++I2) {
+ const BugReportEquivClass& EQ = *I2;
+ const BugReport &R = **EQ.begin();
+ NodeTy *N = const_cast<NodeTy*>(R.getEndNode());
+ if (N) Src.push_back(N);
+ }
+ }
+
+ ViewGraph(&Src[0], &Src[0]+Src.size());
+ }
+ else {
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ llvm::ViewGraph(*G.roots_begin(), "GRExprEngine");
+
+ GraphPrintCheckerState = NULL;
+ GraphPrintSourceManager = NULL;
+ }
+#endif
+}
+
+void GRExprEngine::ViewGraph(NodeTy** Beg, NodeTy** End) {
+#ifndef NDEBUG
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ std::auto_ptr<GRExprEngine::GraphTy> TrimmedG(G.Trim(Beg, End).first);
+
+ if (!TrimmedG.get())
+ llvm::cerr << "warning: Trimmed ExplodedGraph is empty.\n";
+ else
+ llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedGRExprEngine");
+
+ GraphPrintCheckerState = NULL;
+ GraphPrintSourceManager = NULL;
+#endif
+}
diff --git a/lib/Analysis/GRExprEngineInternalChecks.cpp b/lib/Analysis/GRExprEngineInternalChecks.cpp
new file mode 100644
index 0000000..9aea124
--- /dev/null
+++ b/lib/Analysis/GRExprEngineInternalChecks.cpp
@@ -0,0 +1,961 @@
+//=-- GRExprEngineInternalChecks.cpp - Builtin GRExprEngine Checks---*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the BugType classes used by GRExprEngine to report
+// bugs derived from builtin checks in the path-sensitive engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+template <typename ITERATOR> inline
+ExplodedNode<GRState>* GetNode(ITERATOR I) {
+ return *I;
+}
+
+template <> inline
+ExplodedNode<GRState>* GetNode(GRExprEngine::undef_arg_iterator I) {
+ return I->first;
+}
+
+//===----------------------------------------------------------------------===//
+// Forward declarations for bug reporter visitors.
+//===----------------------------------------------------------------------===//
+
+static const Stmt *GetDerefExpr(const ExplodedNode<GRState> *N);
+static const Stmt *GetReceiverExpr(const ExplodedNode<GRState> *N);
+static const Stmt *GetDenomExpr(const ExplodedNode<GRState> *N);
+static const Stmt *GetCalleeExpr(const ExplodedNode<GRState> *N);
+static const Stmt *GetRetValExpr(const ExplodedNode<GRState> *N);
+
+static void registerTrackNullOrUndefValue(BugReporterContext& BRC,
+ const Stmt *ValExpr,
+ const ExplodedNode<GRState>* N);
+
+//===----------------------------------------------------------------------===//
+// Bug Descriptions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN BuiltinBugReport : public RangedBugReport {
+public:
+ BuiltinBugReport(BugType& bt, const char* desc,
+ ExplodedNode<GRState> *n)
+ : RangedBugReport(bt, desc, n) {}
+
+ BuiltinBugReport(BugType& bt, const char *shortDesc, const char *desc,
+ ExplodedNode<GRState> *n)
+ : RangedBugReport(bt, shortDesc, desc, n) {}
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N);
+};
+
+class VISIBILITY_HIDDEN BuiltinBug : public BugType {
+ GRExprEngine &Eng;
+protected:
+ const std::string desc;
+public:
+ BuiltinBug(GRExprEngine *eng, const char* n, const char* d)
+ : BugType(n, "Logic errors"), Eng(*eng), desc(d) {}
+
+ BuiltinBug(GRExprEngine *eng, const char* n)
+ : BugType(n, "Logic errors"), Eng(*eng), desc(n) {}
+
+ virtual void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) = 0;
+
+ void FlushReports(BugReporter& BR) { FlushReportsImpl(BR, Eng); }
+
+ virtual void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {}
+
+ template <typename ITER> void Emit(BugReporter& BR, ITER I, ITER E);
+};
+
+
+template <typename ITER>
+void BuiltinBug::Emit(BugReporter& BR, ITER I, ITER E) {
+ for (; I != E; ++I) BR.EmitReport(new BuiltinBugReport(*this, desc.c_str(),
+ GetNode(I)));
+}
+
+void BuiltinBugReport::registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N) {
+ static_cast<BuiltinBug&>(getBugType()).registerInitialVisitors(BRC, N, this);
+}
+
+class VISIBILITY_HIDDEN NullDeref : public BuiltinBug {
+public:
+ NullDeref(GRExprEngine* eng)
+ : BuiltinBug(eng,"Null dereference", "Dereference of null pointer") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ Emit(BR, Eng.null_derefs_begin(), Eng.null_derefs_end());
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetDerefExpr(N), N);
+ }
+};
+
+class VISIBILITY_HIDDEN NilReceiverStructRet : public BuiltinBug {
+public:
+ NilReceiverStructRet(GRExprEngine* eng) :
+ BuiltinBug(eng, "'nil' receiver with struct return type") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::nil_receiver_struct_ret_iterator
+ I=Eng.nil_receiver_struct_ret_begin(),
+ E=Eng.nil_receiver_struct_ret_end(); I!=E; ++I) {
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PostStmt P = cast<PostStmt>((*I)->getLocation());
+ ObjCMessageExpr *ME = cast<ObjCMessageExpr>(P.getStmt());
+ os << "The receiver in the message expression is 'nil' and results in the"
+ " returned value (of type '"
+ << ME->getType().getAsString()
+ << "') to be garbage or otherwise undefined.";
+
+ BuiltinBugReport *R = new BuiltinBugReport(*this, os.str().c_str(), *I);
+ R->addRange(ME->getReceiver()->getSourceRange());
+ BR.EmitReport(R);
+ }
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
+ }
+};
+
+class VISIBILITY_HIDDEN NilReceiverLargerThanVoidPtrRet : public BuiltinBug {
+public:
+ NilReceiverLargerThanVoidPtrRet(GRExprEngine* eng) :
+ BuiltinBug(eng,
+ "'nil' receiver with return type larger than sizeof(void *)") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::nil_receiver_larger_than_voidptr_ret_iterator
+ I=Eng.nil_receiver_larger_than_voidptr_ret_begin(),
+ E=Eng.nil_receiver_larger_than_voidptr_ret_end(); I!=E; ++I) {
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PostStmt P = cast<PostStmt>((*I)->getLocation());
+ ObjCMessageExpr *ME = cast<ObjCMessageExpr>(P.getStmt());
+ os << "The receiver in the message expression is 'nil' and results in the"
+ " returned value (of type '"
+ << ME->getType().getAsString()
+ << "' and of size "
+ << Eng.getContext().getTypeSize(ME->getType()) / 8
+ << " bytes) to be garbage or otherwise undefined.";
+
+ BuiltinBugReport *R = new BuiltinBugReport(*this, os.str().c_str(), *I);
+ R->addRange(ME->getReceiver()->getSourceRange());
+ BR.EmitReport(R);
+ }
+ }
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
+ }
+};
+
+class VISIBILITY_HIDDEN UndefinedDeref : public BuiltinBug {
+public:
+ UndefinedDeref(GRExprEngine* eng)
+ : BuiltinBug(eng,"Dereference of undefined pointer value") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ Emit(BR, Eng.undef_derefs_begin(), Eng.undef_derefs_end());
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetDerefExpr(N), N);
+ }
+};
+
+class VISIBILITY_HIDDEN DivZero : public BuiltinBug {
+public:
+ DivZero(GRExprEngine* eng)
+ : BuiltinBug(eng,"Division-by-zero",
+ "Division by zero or undefined value.") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ Emit(BR, Eng.explicit_bad_divides_begin(), Eng.explicit_bad_divides_end());
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetDenomExpr(N), N);
+ }
+};
+
+class VISIBILITY_HIDDEN UndefResult : public BuiltinBug {
+public:
+ UndefResult(GRExprEngine* eng) : BuiltinBug(eng,"Undefined result",
+ "Result of operation is undefined.") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ Emit(BR, Eng.undef_results_begin(), Eng.undef_results_end());
+ }
+};
+
+class VISIBILITY_HIDDEN BadCall : public BuiltinBug {
+public:
+ BadCall(GRExprEngine *eng)
+ : BuiltinBug(eng, "Invalid function call",
+ "Called function pointer is a null or undefined pointer value") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ Emit(BR, Eng.bad_calls_begin(), Eng.bad_calls_end());
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetCalleeExpr(N), N);
+ }
+};
+
+
+class VISIBILITY_HIDDEN ArgReport : public BuiltinBugReport {
+ const Stmt *Arg;
+public:
+ ArgReport(BugType& bt, const char* desc, ExplodedNode<GRState> *n,
+ const Stmt *arg)
+ : BuiltinBugReport(bt, desc, n), Arg(arg) {}
+
+ ArgReport(BugType& bt, const char *shortDesc, const char *desc,
+ ExplodedNode<GRState> *n, const Stmt *arg)
+ : BuiltinBugReport(bt, shortDesc, desc, n), Arg(arg) {}
+
+ const Stmt *getArg() const { return Arg; }
+};
+
+class VISIBILITY_HIDDEN BadArg : public BuiltinBug {
+public:
+ BadArg(GRExprEngine* eng) : BuiltinBug(eng,"Uninitialized argument",
+ "Pass-by-value argument in function call is undefined.") {}
+
+ BadArg(GRExprEngine* eng, const char* d)
+ : BuiltinBug(eng,"Uninitialized argument", d) {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::UndefArgsTy::iterator I = Eng.undef_arg_begin(),
+ E = Eng.undef_arg_end(); I!=E; ++I) {
+ // Generate a report for this bug.
+ ArgReport *report = new ArgReport(*this, desc.c_str(), I->first,
+ I->second);
+ report->addRange(I->second->getSourceRange());
+ BR.EmitReport(report);
+ }
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, static_cast<ArgReport*>(R)->getArg(),
+ N);
+ }
+};
+
+class VISIBILITY_HIDDEN BadMsgExprArg : public BadArg {
+public:
+ BadMsgExprArg(GRExprEngine* eng)
+ : BadArg(eng,"Pass-by-value argument in message expression is undefined"){}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::UndefArgsTy::iterator I=Eng.msg_expr_undef_arg_begin(),
+ E = Eng.msg_expr_undef_arg_end(); I!=E; ++I) {
+ // Generate a report for this bug.
+ ArgReport *report = new ArgReport(*this, desc.c_str(), I->first,
+ I->second);
+ report->addRange(I->second->getSourceRange());
+ BR.EmitReport(report);
+ }
+ }
+};
+
+class VISIBILITY_HIDDEN BadReceiver : public BuiltinBug {
+public:
+ BadReceiver(GRExprEngine* eng)
+ : BuiltinBug(eng,"Uninitialized receiver",
+ "Receiver in message expression is an uninitialized value") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::ErrorNodes::iterator I=Eng.undef_receivers_begin(),
+ End = Eng.undef_receivers_end(); I!=End; ++I) {
+
+ // Generate a report for this bug.
+ BuiltinBugReport *report = new BuiltinBugReport(*this, desc.c_str(), *I);
+ ExplodedNode<GRState>* N = *I;
+ Stmt *S = cast<PostStmt>(N->getLocation()).getStmt();
+ Expr* E = cast<ObjCMessageExpr>(S)->getReceiver();
+ assert (E && "Receiver cannot be NULL");
+ report->addRange(E->getSourceRange());
+ BR.EmitReport(report);
+ }
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetReceiverExpr(N), N);
+ }
+};
+
+class VISIBILITY_HIDDEN RetStack : public BuiltinBug {
+public:
+ RetStack(GRExprEngine* eng)
+ : BuiltinBug(eng, "Return of address to stack-allocated memory") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::ret_stackaddr_iterator I=Eng.ret_stackaddr_begin(),
+ End = Eng.ret_stackaddr_end(); I!=End; ++I) {
+
+ ExplodedNode<GRState>* N = *I;
+ Stmt *S = cast<PostStmt>(N->getLocation()).getStmt();
+ Expr* E = cast<ReturnStmt>(S)->getRetValue();
+ assert (E && "Return expression cannot be NULL");
+
+ // Get the value associated with E.
+ loc::MemRegionVal V =
+ cast<loc::MemRegionVal>(Eng.getStateManager().GetSVal(N->getState(),
+ E));
+
+ // Generate a report for this bug.
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ SourceRange R;
+
+ // Check if the region is a compound literal.
+ if (const CompoundLiteralRegion* CR =
+ dyn_cast<CompoundLiteralRegion>(V.getRegion())) {
+
+ const CompoundLiteralExpr* CL = CR->getLiteralExpr();
+ os << "Address of stack memory associated with a compound literal "
+ "declared on line "
+ << BR.getSourceManager()
+ .getInstantiationLineNumber(CL->getLocStart())
+ << " returned.";
+
+ R = CL->getSourceRange();
+ }
+ else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(V.getRegion())) {
+ const Expr* ARE = AR->getExpr();
+ SourceLocation L = ARE->getLocStart();
+ R = ARE->getSourceRange();
+
+ os << "Address of stack memory allocated by call to alloca() on line "
+ << BR.getSourceManager().getInstantiationLineNumber(L)
+ << " returned.";
+ }
+ else {
+ os << "Address of stack memory associated with local variable '"
+ << V.getRegion()->getString() << "' returned.";
+ }
+
+ RangedBugReport *report = new RangedBugReport(*this, os.str().c_str(), N);
+ report->addRange(E->getSourceRange());
+ if (R.isValid()) report->addRange(R);
+ BR.EmitReport(report);
+ }
+ }
+};
+
+class VISIBILITY_HIDDEN RetUndef : public BuiltinBug {
+public:
+ RetUndef(GRExprEngine* eng) : BuiltinBug(eng, "Uninitialized return value",
+ "Uninitialized or undefined value returned to caller.") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ Emit(BR, Eng.ret_undef_begin(), Eng.ret_undef_end());
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, GetRetValExpr(N), N);
+ }
+};
+
+class VISIBILITY_HIDDEN UndefBranch : public BuiltinBug {
+ struct VISIBILITY_HIDDEN FindUndefExpr {
+ GRStateManager& VM;
+ const GRState* St;
+
+ FindUndefExpr(GRStateManager& V, const GRState* S) : VM(V), St(S) {}
+
+ Expr* FindExpr(Expr* Ex) {
+ if (!MatchesCriteria(Ex))
+ return 0;
+
+ for (Stmt::child_iterator I=Ex->child_begin(), E=Ex->child_end();I!=E;++I)
+ if (Expr* ExI = dyn_cast_or_null<Expr>(*I)) {
+ Expr* E2 = FindExpr(ExI);
+ if (E2) return E2;
+ }
+
+ return Ex;
+ }
+
+ bool MatchesCriteria(Expr* Ex) { return VM.GetSVal(St, Ex).isUndef(); }
+ };
+
+public:
+ UndefBranch(GRExprEngine *eng)
+ : BuiltinBug(eng,"Use of uninitialized value",
+ "Branch condition evaluates to an uninitialized value.") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::undef_branch_iterator I=Eng.undef_branches_begin(),
+ E=Eng.undef_branches_end(); I!=E; ++I) {
+
+ // What's going on here: we want to highlight the subexpression of the
+ // condition that is the most likely source of the "uninitialized
+ // branch condition." We do a recursive walk of the condition's
+ // subexpressions and roughly look for the most nested subexpression
+ // that binds to Undefined. We then highlight that expression's range.
+ BlockEdge B = cast<BlockEdge>((*I)->getLocation());
+ Expr* Ex = cast<Expr>(B.getSrc()->getTerminatorCondition());
+ assert (Ex && "Block must have a terminator.");
+
+ // Get the predecessor node and check if is a PostStmt with the Stmt
+ // being the terminator condition. We want to inspect the state
+ // of that node instead because it will contain main information about
+ // the subexpressions.
+ assert (!(*I)->pred_empty());
+
+ // Note: any predecessor will do. They should have identical state,
+ // since all the BlockEdge did was act as an error sink since the value
+ // had to already be undefined.
+ ExplodedNode<GRState> *N = *(*I)->pred_begin();
+ ProgramPoint P = N->getLocation();
+ const GRState* St = (*I)->getState();
+
+ if (PostStmt* PS = dyn_cast<PostStmt>(&P))
+ if (PS->getStmt() == Ex)
+ St = N->getState();
+
+ FindUndefExpr FindIt(Eng.getStateManager(), St);
+ Ex = FindIt.FindExpr(Ex);
+
+ ArgReport *R = new ArgReport(*this, desc.c_str(), *I, Ex);
+ R->addRange(Ex->getSourceRange());
+ BR.EmitReport(R);
+ }
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, static_cast<ArgReport*>(R)->getArg(),
+ N);
+ }
+};
+
+class VISIBILITY_HIDDEN OutOfBoundMemoryAccess : public BuiltinBug {
+public:
+ OutOfBoundMemoryAccess(GRExprEngine* eng)
+ : BuiltinBug(eng,"Out-of-bounds memory access",
+ "Load or store into an out-of-bound memory position.") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ Emit(BR, Eng.explicit_oob_memacc_begin(), Eng.explicit_oob_memacc_end());
+ }
+};
+
+class VISIBILITY_HIDDEN BadSizeVLA : public BuiltinBug {
+public:
+ BadSizeVLA(GRExprEngine* eng) :
+ BuiltinBug(eng, "Bad variable-length array (VLA) size") {}
+
+ void FlushReportsImpl(BugReporter& BR, GRExprEngine& Eng) {
+ for (GRExprEngine::ErrorNodes::iterator
+ I = Eng.ExplicitBadSizedVLA.begin(),
+ E = Eng.ExplicitBadSizedVLA.end(); I!=E; ++I) {
+
+ // Determine whether this was a 'zero-sized' VLA or a VLA with an
+ // undefined size.
+ GRExprEngine::NodeTy* N = *I;
+ PostStmt PS = cast<PostStmt>(N->getLocation());
+ DeclStmt *DS = cast<DeclStmt>(PS.getStmt());
+ VarDecl* VD = cast<VarDecl>(*DS->decl_begin());
+ QualType T = Eng.getContext().getCanonicalType(VD->getType());
+ VariableArrayType* VT = cast<VariableArrayType>(T);
+ Expr* SizeExpr = VT->getSizeExpr();
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "The expression used to specify the number of elements in the "
+ "variable-length array (VLA) '"
+ << VD->getNameAsString() << "' evaluates to ";
+
+ bool isUndefined = Eng.getStateManager().GetSVal(N->getState(),
+ SizeExpr).isUndef();
+
+ if (isUndefined)
+ os << "an undefined or garbage value.";
+ else
+ os << "0. VLAs with no elements have undefined behavior.";
+
+ std::string shortBuf;
+ llvm::raw_string_ostream os_short(shortBuf);
+ os_short << "Variable-length array '" << VD->getNameAsString() << "' "
+ << (isUndefined ? "garbage value for array size"
+ : "has zero elements (undefined behavior)");
+
+ ArgReport *report = new ArgReport(*this, os_short.str().c_str(),
+ os.str().c_str(), N, SizeExpr);
+
+ report->addRange(SizeExpr->getSourceRange());
+ BR.EmitReport(report);
+ }
+ }
+
+ void registerInitialVisitors(BugReporterContext& BRC,
+ const ExplodedNode<GRState>* N,
+ BuiltinBugReport *R) {
+ registerTrackNullOrUndefValue(BRC, static_cast<ArgReport*>(R)->getArg(),
+ N);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// __attribute__(nonnull) checking
+
+class VISIBILITY_HIDDEN CheckAttrNonNull : public GRSimpleAPICheck {
+ BugType *BT;
+ BugReporter &BR;
+
+public:
+ CheckAttrNonNull(BugReporter &br) : BT(0), BR(br) {}
+
+ virtual bool Audit(ExplodedNode<GRState>* N, GRStateManager& VMgr) {
+ CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
+ const GRState* state = N->getState();
+
+ SVal X = VMgr.GetSVal(state, CE->getCallee());
+
+ const FunctionDecl* FD = X.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ const NonNullAttr* Att = FD->getAttr<NonNullAttr>();
+
+ if (!Att)
+ return false;
+
+ // Iterate through the arguments of CE and check them for null.
+ unsigned idx = 0;
+ bool hasError = false;
+
+ for (CallExpr::arg_iterator I=CE->arg_begin(), E=CE->arg_end(); I!=E;
+ ++I, ++idx) {
+
+ if (!VMgr.isEqual(state, *I, 0) || !Att->isNonNull(idx))
+ continue;
+
+ // Lazily allocate the BugType object if it hasn't already been created.
+ // Ownership is transferred to the BugReporter object once the BugReport
+ // is passed to 'EmitWarning'.
+ if (!BT) BT =
+ new BugType("Argument with 'nonnull' attribute passed null", "API");
+
+ RangedBugReport *R = new RangedBugReport(*BT,
+ "Null pointer passed as an argument to a "
+ "'nonnull' parameter", N);
+
+ R->addRange((*I)->getSourceRange());
+ BR.EmitReport(R);
+ hasError = true;
+ }
+
+ return hasError;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Definitions for bug reporter visitors.
+//===----------------------------------------------------------------------===//
+
+static const Stmt *GetDerefExpr(const ExplodedNode<GRState> *N) {
+ // Pattern match for a few useful cases (do something smarter later):
+ // a[0], p->f, *p
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
+ if (U->getOpcode() == UnaryOperator::Deref)
+ return U->getSubExpr()->IgnoreParenCasts();
+ }
+ else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
+ return ME->getBase()->IgnoreParenCasts();
+ }
+ else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
+ // Retrieve the base for arrays since BasicStoreManager doesn't know how
+ // to reason about them.
+ return AE->getBase();
+ }
+
+ return NULL;
+}
+
+static const Stmt *GetReceiverExpr(const ExplodedNode<GRState> *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+ return ME->getReceiver();
+ return NULL;
+}
+
+static const Stmt *GetDenomExpr(const ExplodedNode<GRState> *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
+ return BE->getRHS();
+ return NULL;
+}
+
+static const Stmt *GetCalleeExpr(const ExplodedNode<GRState> *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S))
+ return CE->getCallee();
+ return NULL;
+}
+
+static const Stmt *GetRetValExpr(const ExplodedNode<GRState> *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
+ return RS->getRetValue();
+ return NULL;
+}
+
+namespace {
+class VISIBILITY_HIDDEN FindLastStoreBRVisitor : public BugReporterVisitor {
+ const MemRegion *R;
+ SVal V;
+ bool satisfied;
+ const ExplodedNode<GRState> *StoreSite;
+public:
+ FindLastStoreBRVisitor(SVal v, const MemRegion *r)
+ : R(r), V(v), satisfied(false), StoreSite(0) {}
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode<GRState> *N,
+ const ExplodedNode<GRState> *PrevN,
+ BugReporterContext& BRC) {
+
+ if (satisfied)
+ return NULL;
+
+ if (!StoreSite) {
+ GRStateManager &StateMgr = BRC.getStateManager();
+ const ExplodedNode<GRState> *Node = N, *Last = NULL;
+
+ for ( ; Node ; Last = Node, Node = Node->getFirstPred()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ if (const PostStmt *P = Node->getLocationAs<PostStmt>())
+ if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
+ if (DS->getSingleDecl() == VR->getDecl()) {
+ Last = Node;
+ break;
+ }
+ }
+
+ if (StateMgr.GetSVal(Node->getState(), R) != V)
+ break;
+ }
+
+ if (!Node || !Last) {
+ satisfied = true;
+ return NULL;
+ }
+
+ StoreSite = Last;
+ }
+
+ if (StoreSite != N)
+ return NULL;
+
+ satisfied = true;
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
+ if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "Variable '" << VR->getDecl()->getNameAsString() << "' ";
+ }
+ else
+ return NULL;
+
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ ASTContext &C = BRC.getASTContext();
+ if (R->isBoundable(C)) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ if (C.isObjCObjectPointerType(TR->getValueType(C))) {
+ os << "initialized to nil";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "initialized to a null pointer value";
+ }
+ else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "initialized to " << cast<nonloc::ConcreteInt>(V).getValue();
+ }
+ else if (V.isUndef()) {
+ if (isa<VarRegion>(R)) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (VD->getInit())
+ os << "initialized to a garbage value";
+ else
+ os << "declared without an initial value";
+ }
+ }
+ }
+ }
+
+ if (os.str().empty()) {
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ ASTContext &C = BRC.getASTContext();
+ if (R->isBoundable(C)) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ if (C.isObjCObjectPointerType(TR->getValueType(C))) {
+ os << "nil object reference stored to ";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "Null pointer value stored to ";
+ }
+ else if (V.isUndef()) {
+ os << "Uninitialized value stored to ";
+ }
+ else
+ return NULL;
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << '\'' << VR->getDecl()->getNameAsString() << '\'';
+ }
+ else
+ return NULL;
+ }
+
+ // FIXME: Refactor this into BugReporterContext.
+ Stmt *S = 0;
+ ProgramPoint P = N->getLocation();
+
+ if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ }
+ else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ S = PS->getStmt();
+ }
+
+ if (!S)
+ return NULL;
+
+ // Construct a new PathDiagnosticPiece.
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(L, os.str());
+ }
+};
+
+
+static void registerFindLastStore(BugReporterContext& BRC, const MemRegion *R,
+ SVal V) {
+ BRC.addVisitor(new FindLastStoreBRVisitor(V, R));
+}
+
+class VISIBILITY_HIDDEN TrackConstraintBRVisitor : public BugReporterVisitor {
+ SVal Constraint;
+ const bool Assumption;
+ bool isSatisfied;
+public:
+ TrackConstraintBRVisitor(SVal constraint, bool assumption)
+ : Constraint(constraint), Assumption(assumption), isSatisfied(false) {}
+
+ PathDiagnosticPiece* VisitNode(const ExplodedNode<GRState> *N,
+ const ExplodedNode<GRState> *PrevN,
+ BugReporterContext& BRC) {
+ if (isSatisfied)
+ return NULL;
+
+ // Check if in the previous state it was feasible for this constraint
+ // to *not* be true.
+
+ GRStateManager &StateMgr = BRC.getStateManager();
+ bool isFeasible = false;
+ if (StateMgr.Assume(PrevN->getState(), Constraint, !Assumption,
+ isFeasible)) {
+ assert(isFeasible); // Eventually we don't need 'isFeasible'.
+
+ isSatisfied = true;
+
+ // As a sanity check, make sure that the negation of the constraint
+ // was infeasible in the current state. If it is feasible, we somehow
+ // missed the transition point.
+ isFeasible = false;
+ if (StateMgr.Assume(N->getState(), Constraint, !Assumption,
+ isFeasible)) {
+ assert(isFeasible);
+ return NULL;
+ }
+
+ // We found the transition point for the constraint. We now need to
+ // pretty-print the constraint. (work-in-progress)
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (isa<Loc>(Constraint)) {
+ os << "Assuming pointer value is ";
+ os << (Assumption ? "non-null" : "null");
+ }
+
+ if (os.str().empty())
+ return NULL;
+
+ // FIXME: Refactor this into BugReporterContext.
+ Stmt *S = 0;
+ ProgramPoint P = N->getLocation();
+
+ if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ }
+ else if (PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ S = PS->getStmt();
+ }
+
+ if (!S)
+ return NULL;
+
+ // Construct a new PathDiagnosticPiece.
+ PathDiagnosticLocation L(S, BRC.getSourceManager());
+ return new PathDiagnosticEventPiece(L, os.str());
+ }
+
+ return NULL;
+ }
+};
+} // end anonymous namespace
+
+static void registerTrackConstraint(BugReporterContext& BRC, SVal Constraint,
+ bool Assumption) {
+ BRC.addVisitor(new TrackConstraintBRVisitor(Constraint, Assumption));
+}
+
+static void registerTrackNullOrUndefValue(BugReporterContext& BRC,
+ const Stmt *S,
+ const ExplodedNode<GRState>* N) {
+
+ if (!S)
+ return;
+
+ GRStateManager &StateMgr = BRC.getStateManager();
+ const GRState *state = N->getState();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const VarRegion *R =
+ StateMgr.getRegionManager().getVarRegion(VD);
+
+ // What did we load?
+ SVal V = StateMgr.GetSVal(state, S);
+
+ if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)
+ || V.isUndef()) {
+ registerFindLastStore(BRC, R, V);
+ }
+ }
+ }
+
+ SVal V = StateMgr.GetSValAsScalarOrLoc(state, S);
+
+ // Uncomment this to find cases where we aren't properly getting the
+ // base value that was dereferenced.
+ // assert(!V.isUnknownOrUndef());
+
+ // Is it a symbolic value?
+ if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) {
+ const SubRegion *R = cast<SubRegion>(L->getRegion());
+ while (R && !isa<SymbolicRegion>(R)) {
+ R = dyn_cast<SubRegion>(R->getSuperRegion());
+ }
+
+ if (R) {
+ assert(isa<SymbolicRegion>(R));
+ registerTrackConstraint(BRC, loc::MemRegionVal(R), false);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void GRExprEngine::RegisterInternalChecks() {
+ // Register internal "built-in" BugTypes with the BugReporter. These BugTypes
+ // are different than what probably many checks will do since they don't
+ // create BugReports on-the-fly but instead wait until GRExprEngine finishes
+ // analyzing a function. Generation of BugReport objects is done via a call
+ // to 'FlushReports' from BugReporter.
+ BR.Register(new NullDeref(this));
+ BR.Register(new UndefinedDeref(this));
+ BR.Register(new UndefBranch(this));
+ BR.Register(new DivZero(this));
+ BR.Register(new UndefResult(this));
+ BR.Register(new BadCall(this));
+ BR.Register(new RetStack(this));
+ BR.Register(new RetUndef(this));
+ BR.Register(new BadArg(this));
+ BR.Register(new BadMsgExprArg(this));
+ BR.Register(new BadReceiver(this));
+ BR.Register(new OutOfBoundMemoryAccess(this));
+ BR.Register(new BadSizeVLA(this));
+ BR.Register(new NilReceiverStructRet(this));
+ BR.Register(new NilReceiverLargerThanVoidPtrRet(this));
+
+ // The following checks do not need to have their associated BugTypes
+ // explicitly registered with the BugReporter. If they issue any BugReports,
+ // their associated BugType will get registered with the BugReporter
+ // automatically. Note that the check itself is owned by the GRExprEngine
+ // object.
+ AddCheck(new CheckAttrNonNull(BR), Stmt::CallExprClass);
+}
diff --git a/lib/Analysis/GRSimpleVals.cpp b/lib/Analysis/GRSimpleVals.cpp
new file mode 100644
index 0000000..e1c4848
--- /dev/null
+++ b/lib/Analysis/GRSimpleVals.cpp
@@ -0,0 +1,416 @@
+// GRSimpleVals.cpp - Transfer functions for tracking simple values -*- C++ -*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines GRSimpleVals, a sub-class of GRTransferFuncs that
+// provides transfer functions for performing simple value tracking with
+// limited support for symbolics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRSimpleVals.h"
+#include "BasicObjCFoundationChecks.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "llvm/Support/Compiler.h"
+#include <sstream>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Transfer Function creation for External clients.
+//===----------------------------------------------------------------------===//
+
+GRTransferFuncs* clang::MakeGRSimpleValsTF() { return new GRSimpleVals(); }
+
+//===----------------------------------------------------------------------===//
+// Transfer function for Casts.
+//===----------------------------------------------------------------------===//
+
+SVal GRSimpleVals::EvalCast(GRExprEngine& Eng, NonLoc X, QualType T) {
+
+ if (!isa<nonloc::ConcreteInt>(X))
+ return UnknownVal();
+
+ bool isLocType = Loc::IsLocType(T);
+
+ // Only handle casts from integers to integers.
+ if (!isLocType && !T->isIntegerType())
+ return UnknownVal();
+
+ BasicValueFactory& BasicVals = Eng.getBasicVals();
+
+ llvm::APSInt V = cast<nonloc::ConcreteInt>(X).getValue();
+ V.setIsUnsigned(T->isUnsignedIntegerType() || Loc::IsLocType(T));
+ V.extOrTrunc(Eng.getContext().getTypeSize(T));
+
+ if (isLocType)
+ return loc::ConcreteInt(BasicVals.getValue(V));
+ else
+ return nonloc::ConcreteInt(BasicVals.getValue(V));
+}
+
+// Casts.
+
+SVal GRSimpleVals::EvalCast(GRExprEngine& Eng, Loc X, QualType T) {
+
+ // Casts from pointers -> pointers, just return the lval.
+ //
+ // Casts from pointers -> references, just return the lval. These
+ // can be introduced by the frontend for corner cases, e.g
+ // casting from va_list* to __builtin_va_list&.
+ //
+ assert (!X.isUnknownOrUndef());
+
+ if (Loc::IsLocType(T) || T->isReferenceType())
+ return X;
+
+ // FIXME: Handle transparent unions where a value can be "transparently"
+ // lifted into a union type.
+ if (T->isUnionType())
+ return UnknownVal();
+
+ assert (T->isIntegerType());
+ BasicValueFactory& BasicVals = Eng.getBasicVals();
+ unsigned BitWidth = Eng.getContext().getTypeSize(T);
+
+ if (!isa<loc::ConcreteInt>(X))
+ return nonloc::LocAsInteger::Make(BasicVals, X, BitWidth);
+
+ llvm::APSInt V = cast<loc::ConcreteInt>(X).getValue();
+ V.setIsUnsigned(T->isUnsignedIntegerType() || Loc::IsLocType(T));
+ V.extOrTrunc(BitWidth);
+ return nonloc::ConcreteInt(BasicVals.getValue(V));
+}
+
+// Unary operators.
+
+SVal GRSimpleVals::EvalMinus(GRExprEngine& Eng, UnaryOperator* U, NonLoc X){
+
+ switch (X.getSubKind()) {
+
+ case nonloc::ConcreteIntKind:
+ return cast<nonloc::ConcreteInt>(X).EvalMinus(Eng.getBasicVals(), U);
+
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal GRSimpleVals::EvalComplement(GRExprEngine& Eng, NonLoc X) {
+
+ switch (X.getSubKind()) {
+
+ case nonloc::ConcreteIntKind:
+ return cast<nonloc::ConcreteInt>(X).EvalComplement(Eng.getBasicVals());
+
+ default:
+ return UnknownVal();
+ }
+}
+
+// Binary operators.
+
+static unsigned char LNotOpMap[] = {
+ (unsigned char) BinaryOperator::GE, /* LT => GE */
+ (unsigned char) BinaryOperator::LE, /* GT => LE */
+ (unsigned char) BinaryOperator::GT, /* LE => GT */
+ (unsigned char) BinaryOperator::LT, /* GE => LT */
+ (unsigned char) BinaryOperator::NE, /* EQ => NE */
+ (unsigned char) BinaryOperator::EQ /* NE => EQ */
+};
+
+SVal GRSimpleVals::DetermEvalBinOpNN(GRExprEngine& Eng,
+ BinaryOperator::Opcode Op,
+ NonLoc L, NonLoc R,
+ QualType T) {
+
+ BasicValueFactory& BasicVals = Eng.getBasicVals();
+ unsigned subkind = L.getSubKind();
+
+ while (1) {
+
+ switch (subkind) {
+ default:
+ return UnknownVal();
+
+ case nonloc::LocAsIntegerKind: {
+ Loc LL = cast<nonloc::LocAsInteger>(L).getLoc();
+
+ switch (R.getSubKind()) {
+ case nonloc::LocAsIntegerKind:
+ return EvalBinOp(Eng, Op, LL,
+ cast<nonloc::LocAsInteger>(R).getLoc());
+
+ case nonloc::ConcreteIntKind: {
+ // Transform the integer into a location and compare.
+ ASTContext& Ctx = Eng.getContext();
+ llvm::APSInt V = cast<nonloc::ConcreteInt>(R).getValue();
+ V.setIsUnsigned(true);
+ V.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy));
+ return EvalBinOp(Eng, Op, LL,
+ loc::ConcreteInt(BasicVals.getValue(V)));
+ }
+
+ default:
+ switch (Op) {
+ case BinaryOperator::EQ:
+ return NonLoc::MakeIntTruthVal(BasicVals, false);
+ case BinaryOperator::NE:
+ return NonLoc::MakeIntTruthVal(BasicVals, true);
+ default:
+ // This case also handles pointer arithmetic.
+ return UnknownVal();
+ }
+ }
+ }
+
+ case nonloc::SymExprValKind: {
+ // Logical not?
+ if (!(Op == BinaryOperator::EQ && R.isZeroConstant()))
+ return UnknownVal();
+
+ const SymExpr &SE=*cast<nonloc::SymExprVal>(L).getSymbolicExpression();
+
+ // Only handle ($sym op constant) for now.
+ if (const SymIntExpr *E = dyn_cast<SymIntExpr>(&SE)) {
+ BinaryOperator::Opcode Opc = E->getOpcode();
+
+ if (Opc < BinaryOperator::LT || Opc > BinaryOperator::NE)
+ return UnknownVal();
+
+ // For comparison operators, translate the constraint by
+ // changing the opcode.
+ int idx = (unsigned) Opc - (unsigned) BinaryOperator::LT;
+
+ assert (idx >= 0 &&
+ (unsigned) idx < sizeof(LNotOpMap)/sizeof(unsigned char));
+
+ Opc = (BinaryOperator::Opcode) LNotOpMap[idx];
+ assert(E->getType(Eng.getContext()) == T);
+ E = Eng.getSymbolManager().getSymIntExpr(E->getLHS(), Opc,
+ E->getRHS(), T);
+ return nonloc::SymExprVal(E);
+ }
+
+ return UnknownVal();
+ }
+
+ case nonloc::ConcreteIntKind:
+
+ if (isa<nonloc::ConcreteInt>(R)) {
+ const nonloc::ConcreteInt& L_CI = cast<nonloc::ConcreteInt>(L);
+ const nonloc::ConcreteInt& R_CI = cast<nonloc::ConcreteInt>(R);
+ return L_CI.EvalBinOp(BasicVals, Op, R_CI);
+ }
+ else {
+ subkind = R.getSubKind();
+ NonLoc tmp = R;
+ R = L;
+ L = tmp;
+
+ // Swap the operators.
+ switch (Op) {
+ case BinaryOperator::LT: Op = BinaryOperator::GT; break;
+ case BinaryOperator::GT: Op = BinaryOperator::LT; break;
+ case BinaryOperator::LE: Op = BinaryOperator::GE; break;
+ case BinaryOperator::GE: Op = BinaryOperator::LE; break;
+ default: break;
+ }
+
+ continue;
+ }
+
+ case nonloc::SymbolValKind:
+ if (isa<nonloc::ConcreteInt>(R)) {
+ ValueManager &ValMgr = Eng.getValueManager();
+ return ValMgr.makeNonLoc(cast<nonloc::SymbolVal>(L).getSymbol(), Op,
+ cast<nonloc::ConcreteInt>(R).getValue(), T);
+ }
+ else
+ return UnknownVal();
+ }
+ }
+}
+
+
+// Binary Operators (except assignments and comma).
+
+SVal GRSimpleVals::EvalBinOp(GRExprEngine& Eng, BinaryOperator::Opcode Op,
+ Loc L, Loc R) {
+
+ switch (Op) {
+ default:
+ return UnknownVal();
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ return EvalEquality(Eng, L, R, Op == BinaryOperator::EQ);
+ }
+}
+
+SVal GRSimpleVals::EvalBinOp(GRExprEngine& Eng, const GRState *state,
+ BinaryOperator::Opcode Op, Loc L, NonLoc R) {
+
+ // Special case: 'R' is an integer that has the same width as a pointer and
+ // we are using the integer location in a comparison. Normally this cannot be
+ // triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
+ // can generate comparisons that trigger this code.
+ // FIXME: Are all locations guaranteed to have pointer width?
+ if (BinaryOperator::isEqualityOp(Op)) {
+ if (nonloc::ConcreteInt *RInt = dyn_cast<nonloc::ConcreteInt>(&R)) {
+ const llvm::APSInt *X = &RInt->getValue();
+ ASTContext &C = Eng.getContext();
+ if (C.getTypeSize(C.VoidPtrTy) == X->getBitWidth()) {
+ // Convert the signedness of the integer (if necessary).
+ if (X->isSigned())
+ X = &Eng.getBasicVals().getValue(*X, true);
+
+ return EvalBinOp(Eng, Op, L, loc::ConcreteInt(*X));
+ }
+ }
+ }
+
+ // Delegate pointer arithmetic to store manager.
+ return Eng.getStoreManager().EvalBinOp(state, Op, L, R);
+}
+
+// Equality operators for Locs.
+// FIXME: All this logic will be revamped when we have MemRegion::getLocation()
+// implemented.
+
+SVal GRSimpleVals::EvalEquality(GRExprEngine& Eng, Loc L, Loc R, bool isEqual) {
+
+ BasicValueFactory& BasicVals = Eng.getBasicVals();
+
+ switch (L.getSubKind()) {
+
+ default:
+ assert(false && "EQ/NE not implemented for this Loc.");
+ return UnknownVal();
+
+ case loc::ConcreteIntKind:
+
+ if (isa<loc::ConcreteInt>(R)) {
+ bool b = cast<loc::ConcreteInt>(L).getValue() ==
+ cast<loc::ConcreteInt>(R).getValue();
+
+ // Are we computing '!='? Flip the result.
+ if (!isEqual)
+ b = !b;
+
+ return NonLoc::MakeIntTruthVal(BasicVals, b);
+ }
+ else if (SymbolRef Sym = R.getAsSymbol()) {
+ const SymIntExpr * SE =
+ Eng.getSymbolManager().getSymIntExpr(Sym,
+ isEqual ? BinaryOperator::EQ
+ : BinaryOperator::NE,
+ cast<loc::ConcreteInt>(L).getValue(),
+ Eng.getContext().IntTy);
+ return nonloc::SymExprVal(SE);
+ }
+
+ break;
+
+ case loc::MemRegionKind: {
+ if (SymbolRef LSym = L.getAsLocSymbol()) {
+ if (isa<loc::ConcreteInt>(R)) {
+ const SymIntExpr *SE =
+ Eng.getSymbolManager().getSymIntExpr(LSym,
+ isEqual ? BinaryOperator::EQ
+ : BinaryOperator::NE,
+ cast<loc::ConcreteInt>(R).getValue(),
+ Eng.getContext().IntTy);
+
+ return nonloc::SymExprVal(SE);
+ }
+ }
+ }
+
+ // Fall-through.
+
+ case loc::GotoLabelKind:
+ return NonLoc::MakeIntTruthVal(BasicVals, isEqual ? L == R : L != R);
+ }
+
+ return NonLoc::MakeIntTruthVal(BasicVals, isEqual ? false : true);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for function calls.
+//===----------------------------------------------------------------------===//
+
+void GRSimpleVals::EvalCall(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode<GRState>* Pred) {
+
+ GRStateManager& StateMgr = Eng.getStateManager();
+ const GRState* St = Builder.GetState(Pred);
+
+ // Invalidate all arguments passed in by reference (Locs).
+
+ for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I) {
+
+ SVal V = StateMgr.GetSVal(St, *I);
+
+ if (isa<loc::MemRegionVal>(V))
+ St = StateMgr.BindLoc(St, cast<Loc>(V), UnknownVal());
+ else if (isa<nonloc::LocAsInteger>(V))
+ St = StateMgr.BindLoc(St, cast<nonloc::LocAsInteger>(V).getLoc(),
+ UnknownVal());
+
+ }
+
+ // Make up a symbol for the return value of this function.
+ // FIXME: We eventually should handle structs and other compound types
+ // that are returned by value.
+ QualType T = CE->getType();
+ if (Loc::IsLocType(T) || (T->isIntegerType() && T->isScalarType())) {
+ unsigned Count = Builder.getCurrentBlockCount();
+ SVal X = Eng.getValueManager().getConjuredSymbolVal(CE, Count);
+ St = StateMgr.BindExpr(St, CE, X, Eng.getCFG().isBlkExpr(CE), false);
+ }
+
+ Builder.MakeNode(Dst, CE, Pred, St);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for Objective-C message expressions.
+//===----------------------------------------------------------------------===//
+
+void GRSimpleVals::EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Eng,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode<GRState>* Pred) {
+
+
+ // The basic transfer function logic for message expressions does nothing.
+ // We just invalidate all arguments passed in by references.
+
+ GRStateManager& StateMgr = Eng.getStateManager();
+ const GRState* St = Builder.GetState(Pred);
+
+ for (ObjCMessageExpr::arg_iterator I = ME->arg_begin(), E = ME->arg_end();
+ I != E; ++I) {
+
+ SVal V = StateMgr.GetSVal(St, *I);
+
+ if (isa<Loc>(V))
+ St = StateMgr.BindLoc(St, cast<Loc>(V), UnknownVal());
+ }
+
+ Builder.MakeNode(Dst, ME, Pred, St);
+}
diff --git a/lib/Analysis/GRSimpleVals.h b/lib/Analysis/GRSimpleVals.h
new file mode 100644
index 0000000..6ef49dc
--- /dev/null
+++ b/lib/Analysis/GRSimpleVals.h
@@ -0,0 +1,86 @@
+// GRSimpleVals.h - Transfer functions for tracking simple values -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines GRSimpleVals, a sub-class of GRTransferFuncs that
+// provides transfer functions for performing simple value tracking with
+// limited support for symbolics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_GRSIMPLEVALS
+#define LLVM_CLANG_ANALYSIS_GRSIMPLEVALS
+
+#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+
+namespace clang {
+
+class PathDiagnostic;
+class ASTContext;
+
+class GRSimpleVals : public GRTransferFuncs {
+protected:
+
+ virtual SVal DetermEvalBinOpNN(GRExprEngine& Eng,
+ BinaryOperator::Opcode Op,
+ NonLoc L, NonLoc R, QualType T);
+
+public:
+ GRSimpleVals() {}
+ virtual ~GRSimpleVals() {}
+
+ // Casts.
+
+ virtual SVal EvalCast(GRExprEngine& Engine, NonLoc V, QualType CastT);
+ virtual SVal EvalCast(GRExprEngine& Engine, Loc V, QualType CastT);
+
+ // Unary Operators.
+
+ virtual SVal EvalMinus(GRExprEngine& Engine, UnaryOperator* U, NonLoc X);
+
+ virtual SVal EvalComplement(GRExprEngine& Engine, NonLoc X);
+
+ // Binary Operators.
+
+ virtual SVal EvalBinOp(GRExprEngine& Engine, BinaryOperator::Opcode Op,
+ Loc L, Loc R);
+
+ // Pointer arithmetic.
+
+ virtual SVal EvalBinOp(GRExprEngine& Engine, const GRState *state,
+ BinaryOperator::Opcode Op, Loc L, NonLoc R);
+
+ // Calls.
+
+ virtual void EvalCall(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ CallExpr* CE, SVal L,
+ ExplodedNode<GRState>* Pred);
+
+ virtual void EvalObjCMessageExpr(ExplodedNodeSet<GRState>& Dst,
+ GRExprEngine& Engine,
+ GRStmtNodeBuilder<GRState>& Builder,
+ ObjCMessageExpr* ME,
+ ExplodedNode<GRState>* Pred);
+
+
+
+ static void GeneratePathDiagnostic(PathDiagnostic& PD, ASTContext& Ctx,
+ ExplodedNode<GRState>* N);
+
+protected:
+
+ // Equality (==, !=) operators for Locs.
+ SVal EvalEquality(GRExprEngine& Engine, Loc L, Loc R, bool isEqual);
+};
+
+} // end clang namespace
+
+#endif
diff --git a/lib/Analysis/GRState.cpp b/lib/Analysis/GRState.cpp
new file mode 100644
index 0000000..e0e478c
--- /dev/null
+++ b/lib/Analysis/GRState.cpp
@@ -0,0 +1,318 @@
+//= GRState*cpp - Path-Sens. "State" for tracking valuues -----*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SymbolRef, ExprBindKey, and GRState*
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/GRStateTrait.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+// Give the vtable for ConstraintManager somewhere to live.
+ConstraintManager::~ConstraintManager() {}
+
+GRStateManager::~GRStateManager() {
+ for (std::vector<GRState::Printer*>::iterator I=Printers.begin(),
+ E=Printers.end(); I!=E; ++I)
+ delete *I;
+
+ for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
+ I!=E; ++I)
+ I->second.second(I->second.first);
+}
+
+const GRState*
+GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
+ SymbolReaper& SymReaper) {
+
+ // This code essentially performs a "mark-and-sweep" of the VariableBindings.
+ // The roots are any Block-level exprs and Decls that our liveness algorithm
+ // tells us are live. We then see what Decls they may reference, and keep
+ // those around. This code more than likely can be made faster, and the
+ // frequency of which this method is called should be experimented with
+ // for optimum performance.
+ llvm::SmallVector<const MemRegion*, 10> RegionRoots;
+ GRState NewState = *state;
+
+ NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, Loc, SymReaper, *this,
+ state, RegionRoots);
+
+ // Clean up the store.
+ NewState.St = StoreMgr->RemoveDeadBindings(&NewState, Loc, SymReaper,
+ RegionRoots);
+
+ return ConstraintMgr->RemoveDeadBindings(getPersistentState(NewState),
+ SymReaper);
+}
+
+const GRState* GRStateManager::Unbind(const GRState* St, Loc LV) {
+ Store OldStore = St->getStore();
+ Store NewStore = StoreMgr->Remove(OldStore, LV);
+
+ if (NewStore == OldStore)
+ return St;
+
+ GRState NewSt = *St;
+ NewSt.St = NewStore;
+ return getPersistentState(NewSt);
+}
+
+const GRState* GRStateManager::getInitialState() {
+
+ GRState StateImpl(EnvMgr.getInitialEnvironment(),
+ StoreMgr->getInitialStore(),
+ GDMFactory.GetEmptyMap());
+
+ return getPersistentState(StateImpl);
+}
+
+const GRState* GRStateManager::getPersistentState(GRState& State) {
+
+ llvm::FoldingSetNodeID ID;
+ State.Profile(ID);
+ void* InsertPos;
+
+ if (GRState* I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
+ return I;
+
+ GRState* I = (GRState*) Alloc.Allocate<GRState>();
+ new (I) GRState(State);
+ StateSet.InsertNode(I, InsertPos);
+ return I;
+}
+
+const GRState* GRStateManager::MakeStateWithStore(const GRState* St,
+ Store store) {
+ GRState NewSt = *St;
+ NewSt.St = store;
+ return getPersistentState(NewSt);
+}
+
+
+//===----------------------------------------------------------------------===//
+// State pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void GRState::print(std::ostream& Out, StoreManager& StoreMgr,
+ ConstraintManager& ConstraintMgr,
+ Printer** Beg, Printer** End,
+ const char* nl, const char* sep) const {
+
+ // Print the store.
+ StoreMgr.print(getStore(), Out, nl, sep);
+
+ // Print Subexpression bindings.
+ bool isFirst = true;
+
+ for (seb_iterator I = seb_begin(), E = seb_end(); I != E; ++I) {
+
+ if (isFirst) {
+ Out << nl << nl << "Sub-Expressions:" << nl;
+ isFirst = false;
+ }
+ else { Out << nl; }
+
+ Out << " (" << (void*) I.getKey() << ") ";
+ llvm::raw_os_ostream OutS(Out);
+ I.getKey()->printPretty(OutS);
+ OutS.flush();
+ Out << " : ";
+ I.getData().print(Out);
+ }
+
+ // Print block-expression bindings.
+ isFirst = true;
+
+ for (beb_iterator I = beb_begin(), E = beb_end(); I != E; ++I) {
+
+ if (isFirst) {
+ Out << nl << nl << "Block-level Expressions:" << nl;
+ isFirst = false;
+ }
+ else { Out << nl; }
+
+ Out << " (" << (void*) I.getKey() << ") ";
+ llvm::raw_os_ostream OutS(Out);
+ I.getKey()->printPretty(OutS);
+ OutS.flush();
+ Out << " : ";
+ I.getData().print(Out);
+ }
+
+ ConstraintMgr.print(this, Out, nl, sep);
+
+ // Print checker-specific data.
+ for ( ; Beg != End ; ++Beg) (*Beg)->Print(Out, this, nl, sep);
+}
+
+void GRStateRef::printDOT(std::ostream& Out) const {
+ print(Out, "\\l", "\\|");
+}
+
+void GRStateRef::printStdErr() const {
+ print(*llvm::cerr);
+}
+
+void GRStateRef::print(std::ostream& Out, const char* nl, const char* sep)const{
+ GRState::Printer **beg = Mgr->Printers.empty() ? 0 : &Mgr->Printers[0];
+ GRState::Printer **end = !beg ? 0 : beg + Mgr->Printers.size();
+ St->print(Out, *Mgr->StoreMgr, *Mgr->ConstraintMgr, beg, end, nl, sep);
+}
+
+//===----------------------------------------------------------------------===//
+// Generic Data Map.
+//===----------------------------------------------------------------------===//
+
+void* const* GRState::FindGDM(void* K) const {
+ return GDM.lookup(K);
+}
+
+void*
+GRStateManager::FindGDMContext(void* K,
+ void* (*CreateContext)(llvm::BumpPtrAllocator&),
+ void (*DeleteContext)(void*)) {
+
+ std::pair<void*, void (*)(void*)>& p = GDMContexts[K];
+ if (!p.first) {
+ p.first = CreateContext(Alloc);
+ p.second = DeleteContext;
+ }
+
+ return p.first;
+}
+
+const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){
+ GRState::GenericDataMap M1 = St->getGDM();
+ GRState::GenericDataMap M2 = GDMFactory.Add(M1, Key, Data);
+
+ if (M1 == M2)
+ return St;
+
+ GRState NewSt = *St;
+ NewSt.GDM = M2;
+ return getPersistentState(NewSt);
+}
+
+//===----------------------------------------------------------------------===//
+// Utility.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN ScanReachableSymbols : public SubRegionMap::Visitor {
+ typedef llvm::DenseSet<const MemRegion*> VisitedRegionsTy;
+
+ VisitedRegionsTy visited;
+ GRStateRef state;
+ SymbolVisitor &visitor;
+ llvm::OwningPtr<SubRegionMap> SRM;
+public:
+
+ ScanReachableSymbols(GRStateManager* sm, const GRState *st, SymbolVisitor& v)
+ : state(st, *sm), visitor(v) {}
+
+ bool scan(nonloc::CompoundVal val);
+ bool scan(SVal val);
+ bool scan(const MemRegion *R);
+
+ // From SubRegionMap::Visitor.
+ bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) {
+ return scan(SubRegion);
+ }
+};
+}
+
+bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
+ for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
+ if (!scan(*I))
+ return false;
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(SVal val) {
+ if (loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&val))
+ return scan(X->getRegion());
+
+ if (SymbolRef Sym = val.getAsSymbol())
+ return visitor.VisitSymbol(Sym);
+
+ if (nonloc::CompoundVal *X = dyn_cast<nonloc::CompoundVal>(&val))
+ return scan(*X);
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(const MemRegion *R) {
+ if (isa<MemSpaceRegion>(R) || visited.count(R))
+ return true;
+
+ visited.insert(R);
+
+ // If this is a symbolic region, visit the symbol for the region.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ if (!visitor.VisitSymbol(SR->getSymbol()))
+ return false;
+
+ // If this is a subregion, also visit the parent regions.
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R))
+ if (!scan(SR->getSuperRegion()))
+ return false;
+
+ // Now look at the binding to this region (if any).
+ if (!scan(state.GetSValAsScalarOrLoc(R)))
+ return false;
+
+ // Now look at the subregions.
+ if (!SRM.get())
+ SRM.reset(state.getManager().getStoreManager().getSubRegionMap(state));
+
+ return SRM->iterSubRegions(R, *this);
+}
+
+bool GRStateManager::scanReachableSymbols(SVal val, const GRState* state,
+ SymbolVisitor& visitor) {
+ ScanReachableSymbols S(this, state, visitor);
+ return S.scan(val);
+}
+
+//===----------------------------------------------------------------------===//
+// Queries.
+//===----------------------------------------------------------------------===//
+
+bool GRStateManager::isEqual(const GRState* state, Expr* Ex,
+ const llvm::APSInt& Y) {
+
+ SVal V = GetSVal(state, Ex);
+
+ if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
+ return X->getValue() == Y;
+
+ if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
+ return X->getValue() == Y;
+
+ if (SymbolRef Sym = V.getAsSymbol())
+ return ConstraintMgr->isEqual(state, Sym, Y);
+
+ return false;
+}
+
+bool GRStateManager::isEqual(const GRState* state, Expr* Ex, uint64_t x) {
+ return isEqual(state, Ex, getBasicVals().getValue(x, Ex->getType()));
+}
+
+//===----------------------------------------------------------------------===//
+// Persistent values for indexing into the Generic Data Map.
+
+int GRState::NullDerefTag::TagInt = 0;
+
diff --git a/lib/Analysis/GRTransferFuncs.cpp b/lib/Analysis/GRTransferFuncs.cpp
new file mode 100644
index 0000000..69c09d9
--- /dev/null
+++ b/lib/Analysis/GRTransferFuncs.cpp
@@ -0,0 +1,28 @@
+//== GRTransferFuncs.cpp - Path-Sens. Transfer Functions Interface -*- C++ -*--=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines GRTransferFuncs, which provides a base-class that
+// defines an interface for transfer functions used by GRExprEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+
+using namespace clang;
+
+void GRTransferFuncs::EvalBinOpNN(GRStateSet& OStates,
+ GRExprEngine& Eng,
+ const GRState *St, Expr* Ex,
+ BinaryOperator::Opcode Op,
+ NonLoc L, NonLoc R, QualType T) {
+
+ OStates.Add(Eng.getStateManager().BindExpr(St, Ex,
+ DetermEvalBinOpNN(Eng, Op, L, R, T)));
+}
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
new file mode 100644
index 0000000..b0eb37b
--- /dev/null
+++ b/lib/Analysis/LiveVariables.cpp
@@ -0,0 +1,359 @@
+//=- LiveVariables.cpp - Live Variable Analysis for Source CFGs -*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Live Variables analysis for source-level CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/CFG.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/Analysis/FlowSensitive/DataflowSolver.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+
+#include <string.h>
+#include <stdio.h>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Useful constants.
+//===----------------------------------------------------------------------===//
+
+static const bool Alive = true;
+static const bool Dead = false;
+
+//===----------------------------------------------------------------------===//
+// Dataflow initialization logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN RegisterDecls
+ : public CFGRecStmtDeclVisitor<RegisterDecls> {
+
+ LiveVariables::AnalysisDataTy& AD;
+
+ typedef llvm::SmallVector<VarDecl*, 20> AlwaysLiveTy;
+ AlwaysLiveTy AlwaysLive;
+
+
+public:
+ RegisterDecls(LiveVariables::AnalysisDataTy& ad) : AD(ad) {}
+
+ ~RegisterDecls() {
+
+ AD.AlwaysLive.resetValues(AD);
+
+ for (AlwaysLiveTy::iterator I = AlwaysLive.begin(), E = AlwaysLive.end();
+ I != E; ++ I)
+ AD.AlwaysLive(*I, AD) = Alive;
+ }
+
+ void VisitImplicitParamDecl(ImplicitParamDecl* IPD) {
+ // Register the VarDecl for tracking.
+ AD.Register(IPD);
+ }
+
+ void VisitVarDecl(VarDecl* VD) {
+ // Register the VarDecl for tracking.
+ AD.Register(VD);
+
+ // Does the variable have global storage? If so, it is always live.
+ if (VD->hasGlobalStorage())
+ AlwaysLive.push_back(VD);
+ }
+
+ CFG& getCFG() { return AD.getCFG(); }
+};
+} // end anonymous namespace
+
+LiveVariables::LiveVariables(ASTContext& Ctx, CFG& cfg) {
+ // Register all referenced VarDecls.
+ getAnalysisData().setCFG(cfg);
+ getAnalysisData().setContext(Ctx);
+
+ RegisterDecls R(getAnalysisData());
+ cfg.VisitBlockStmts(R);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN TransferFuncs : public CFGRecStmtVisitor<TransferFuncs>{
+ LiveVariables::AnalysisDataTy& AD;
+ LiveVariables::ValTy LiveState;
+public:
+ TransferFuncs(LiveVariables::AnalysisDataTy& ad) : AD(ad) {}
+
+ LiveVariables::ValTy& getVal() { return LiveState; }
+ CFG& getCFG() { return AD.getCFG(); }
+
+ void VisitDeclRefExpr(DeclRefExpr* DR);
+ void VisitBinaryOperator(BinaryOperator* B);
+ void VisitAssign(BinaryOperator* B);
+ void VisitDeclStmt(DeclStmt* DS);
+ void BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
+ void VisitUnaryOperator(UnaryOperator* U);
+ void Visit(Stmt *S);
+ void VisitTerminator(CFGBlock* B);
+
+ void SetTopValue(LiveVariables::ValTy& V) {
+ V = AD.AlwaysLive;
+ }
+
+};
+
+void TransferFuncs::Visit(Stmt *S) {
+
+ if (S == getCurrentBlkStmt()) {
+
+ if (AD.Observer)
+ AD.Observer->ObserveStmt(S,AD,LiveState);
+
+ if (getCFG().isBlkExpr(S)) LiveState(S,AD) = Dead;
+ StmtVisitor<TransferFuncs,void>::Visit(S);
+ }
+ else if (!getCFG().isBlkExpr(S)) {
+
+ if (AD.Observer)
+ AD.Observer->ObserveStmt(S,AD,LiveState);
+
+ StmtVisitor<TransferFuncs,void>::Visit(S);
+
+ }
+ else
+ // For block-level expressions, mark that they are live.
+ LiveState(S,AD) = Alive;
+}
+
+void TransferFuncs::VisitTerminator(CFGBlock* B) {
+
+ const Stmt* E = B->getTerminatorCondition();
+
+ if (!E)
+ return;
+
+ assert (getCFG().isBlkExpr(E));
+ LiveState(E, AD) = Alive;
+}
+
+void TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
+ if (VarDecl* V = dyn_cast<VarDecl>(DR->getDecl()))
+ LiveState(V,AD) = Alive;
+}
+
+void TransferFuncs::VisitBinaryOperator(BinaryOperator* B) {
+ if (B->isAssignmentOp()) VisitAssign(B);
+ else VisitStmt(B);
+}
+
+void
+TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
+
+ // This is a block-level expression. Its value is 'dead' before this point.
+ LiveState(S, AD) = Dead;
+
+ // This represents a 'use' of the collection.
+ Visit(S->getCollection());
+
+ // This represents a 'kill' for the variable.
+ Stmt* Element = S->getElement();
+ DeclRefExpr* DR = 0;
+ VarDecl* VD = 0;
+
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(Element))
+ VD = cast<VarDecl>(DS->getSingleDecl());
+ else {
+ Expr* ElemExpr = cast<Expr>(Element)->IgnoreParens();
+ if ((DR = dyn_cast<DeclRefExpr>(ElemExpr)))
+ VD = cast<VarDecl>(DR->getDecl());
+ else {
+ Visit(ElemExpr);
+ return;
+ }
+ }
+
+ if (VD) {
+ LiveState(VD, AD) = Dead;
+ if (AD.Observer && DR) { AD.Observer->ObserverKill(DR); }
+ }
+}
+
+
+void TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
+ Expr *E = U->getSubExpr();
+
+ switch (U->getOpcode()) {
+ case UnaryOperator::PostInc:
+ case UnaryOperator::PostDec:
+ case UnaryOperator::PreInc:
+ case UnaryOperator::PreDec:
+ // Walk through the subexpressions, blasting through ParenExprs
+ // until we either find a DeclRefExpr or some non-DeclRefExpr
+ // expression.
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParens()))
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Treat the --/++ operator as a kill.
+ if (AD.Observer) { AD.Observer->ObserverKill(DR); }
+ LiveState(VD, AD) = Alive;
+ return VisitDeclRefExpr(DR);
+ }
+
+ // Fall-through.
+
+ default:
+ return Visit(E);
+ }
+}
+
+void TransferFuncs::VisitAssign(BinaryOperator* B) {
+ Expr* LHS = B->getLHS();
+
+ // Assigning to a variable?
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParens())) {
+
+ // Update liveness inforamtion.
+ unsigned bit = AD.getIdx(DR->getDecl());
+ LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
+
+ if (AD.Observer) { AD.Observer->ObserverKill(DR); }
+
+ // Handle things like +=, etc., which also generate "uses"
+ // of a variable. Do this just by visiting the subexpression.
+ if (B->getOpcode() != BinaryOperator::Assign)
+ VisitDeclRefExpr(DR);
+ }
+ else // Not assigning to a variable. Process LHS as usual.
+ Visit(LHS);
+
+ Visit(B->getRHS());
+}
+
+void TransferFuncs::VisitDeclStmt(DeclStmt* DS) {
+ // Declarations effectively "kill" a variable since they cannot
+ // possibly be live before they are declared.
+ for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI)
+ if (VarDecl* VD = dyn_cast<VarDecl>(*DI)) {
+ // The initializer is evaluated after the variable comes into scope.
+ // Since this is a reverse dataflow analysis, we must evaluate the
+ // transfer function for this expression first.
+ if (Expr* Init = VD->getInit())
+ Visit(Init);
+
+ if (const VariableArrayType* VT =
+ AD.getContext().getAsVariableArrayType(VD->getType())) {
+ StmtIterator I(const_cast<VariableArrayType*>(VT));
+ StmtIterator E;
+ for (; I != E; ++I) Visit(*I);
+ }
+
+ // Update liveness information by killing the VarDecl.
+ unsigned bit = AD.getIdx(VD);
+ LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
+ }
+}
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Merge operator: if something is live on any successor block, it is live
+// in the current block (a set union).
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+struct Merge {
+ typedef StmtDeclBitVector_Types::ValTy ValTy;
+
+ void operator()(ValTy& Dst, const ValTy& Src) {
+ Dst.OrDeclBits(Src);
+ Dst.OrBlkExprBits(Src);
+ }
+};
+
+typedef DataflowSolver<LiveVariables, TransferFuncs, Merge> Solver;
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// External interface to run Liveness analysis.
+//===----------------------------------------------------------------------===//
+
+void LiveVariables::runOnCFG(CFG& cfg) {
+ Solver S(*this);
+ S.runOnCFG(cfg);
+}
+
+void LiveVariables::runOnAllBlocks(const CFG& cfg,
+ LiveVariables::ObserverTy* Obs,
+ bool recordStmtValues) {
+ Solver S(*this);
+ ObserverTy* OldObserver = getAnalysisData().Observer;
+ getAnalysisData().Observer = Obs;
+ S.runOnAllBlocks(cfg, recordStmtValues);
+ getAnalysisData().Observer = OldObserver;
+}
+
+//===----------------------------------------------------------------------===//
+// liveness queries
+//
+
+bool LiveVariables::isLive(const CFGBlock* B, const VarDecl* D) const {
+ DeclBitVector_Types::Idx i = getAnalysisData().getIdx(D);
+ return i.isValid() ? getBlockData(B).getBit(i) : false;
+}
+
+bool LiveVariables::isLive(const ValTy& Live, const VarDecl* D) const {
+ DeclBitVector_Types::Idx i = getAnalysisData().getIdx(D);
+ return i.isValid() ? Live.getBit(i) : false;
+}
+
+bool LiveVariables::isLive(const Stmt* Loc, const Stmt* StmtVal) const {
+ return getStmtData(Loc)(StmtVal,getAnalysisData());
+}
+
+bool LiveVariables::isLive(const Stmt* Loc, const VarDecl* D) const {
+ return getStmtData(Loc)(D,getAnalysisData());
+}
+
+//===----------------------------------------------------------------------===//
+// printing liveness state for debugging
+//
+
+void LiveVariables::dumpLiveness(const ValTy& V, SourceManager& SM) const {
+ const AnalysisDataTy& AD = getAnalysisData();
+
+ for (AnalysisDataTy::decl_iterator I = AD.begin_decl(),
+ E = AD.end_decl(); I!=E; ++I)
+ if (V.getDeclBit(I->second)) {
+ fprintf(stderr, " %s <", I->first->getIdentifier()->getName());
+ I->first->getLocation().dump(SM);
+ fprintf(stderr, ">\n");
+ }
+}
+
+void LiveVariables::dumpBlockLiveness(SourceManager& M) const {
+ for (BlockDataMapTy::iterator I = getBlockDataMap().begin(),
+ E = getBlockDataMap().end(); I!=E; ++I) {
+ fprintf(stderr, "\n[ B%d (live variables at block exit) ]\n",
+ I->first->getBlockID());
+
+ dumpLiveness(I->second,M);
+ }
+
+ fprintf(stderr,"\n");
+}
diff --git a/lib/Analysis/Makefile b/lib/Analysis/Makefile
new file mode 100644
index 0000000..c597254
--- /dev/null
+++ b/lib/Analysis/Makefile
@@ -0,0 +1,22 @@
+##===- clang/lib/Analysis/Makefile -------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangAnalysis
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Analysis/MemRegion.cpp b/lib/Analysis/MemRegion.cpp
new file mode 100644
index 0000000..9f066f4
--- /dev/null
+++ b/lib/Analysis/MemRegion.cpp
@@ -0,0 +1,494 @@
+//== MemRegion.cpp - Abstract memory regions for static analysis --*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MemRegion and its subclasses. MemRegion defines a
+// partially-typed abstraction of memory useful for path-sensitive dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/raw_ostream.h"
+#include "clang/Analysis/PathSensitive/MemRegion.h"
+
+using namespace clang;
+
+
+MemRegion::~MemRegion() {}
+
+bool SubRegion::isSubRegionOf(const MemRegion* R) const {
+ const MemRegion* r = getSuperRegion();
+ while (r != 0) {
+ if (r == R)
+ return true;
+ if (const SubRegion* sr = dyn_cast<SubRegion>(r))
+ r = sr->getSuperRegion();
+ else
+ break;
+ }
+ return false;
+}
+
+void MemSpaceRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned)getKind());
+}
+
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const StringLiteral* Str,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) StringRegionKind);
+ ID.AddPointer(Str);
+ ID.AddPointer(superRegion);
+}
+
+void AllocaRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const Expr* Ex, unsigned cnt) {
+ ID.AddInteger((unsigned) AllocaRegionKind);
+ ID.AddPointer(Ex);
+ ID.AddInteger(cnt);
+}
+
+void AllocaRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ProfileRegion(ID, Ex, Cnt);
+}
+
+void TypedViewRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, QualType T,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) TypedViewRegionKind);
+ ID.Add(T);
+ ID.AddPointer(superRegion);
+}
+
+void CompoundLiteralRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion);
+}
+
+void CompoundLiteralRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const CompoundLiteralExpr* CL,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) CompoundLiteralRegionKind);
+ ID.AddPointer(CL);
+ ID.AddPointer(superRegion);
+}
+
+void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl* D,
+ const MemRegion* superRegion, Kind k) {
+ ID.AddInteger((unsigned) k);
+ ID.AddPointer(D);
+ ID.AddPointer(superRegion);
+}
+
+void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
+}
+
+void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym) {
+ ID.AddInteger((unsigned) MemRegion::SymbolicRegionKind);
+ ID.Add(sym);
+}
+
+void SymbolicRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ SymbolicRegion::ProfileRegion(ID, sym);
+}
+
+void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ QualType ElementType, SVal Idx,
+ const MemRegion* superRegion) {
+ ID.AddInteger(MemRegion::ElementRegionKind);
+ ID.Add(ElementType);
+ ID.AddPointer(superRegion);
+ Idx.Profile(ID);
+}
+
+void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ElementRegion::ProfileRegion(ID, ElementType, Index, superRegion);
+}
+
+void CodeTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const void* data,
+ QualType t) {
+ ID.AddInteger(MemRegion::CodeTextRegionKind);
+ ID.AddPointer(data);
+ ID.Add(t);
+}
+
+void CodeTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ CodeTextRegion::ProfileRegion(ID, Data, LocationType);
+}
+
+//===----------------------------------------------------------------------===//
+// Region pretty-printing.
+//===----------------------------------------------------------------------===//
+
+std::string MemRegion::getString() const {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ print(os);
+ return os.str();
+}
+
+void MemRegion::print(llvm::raw_ostream& os) const {
+ os << "<Unknown Region>";
+}
+
+void AllocaRegion::print(llvm::raw_ostream& os) const {
+ os << "alloca{" << (void*) Ex << ',' << Cnt << '}';
+}
+
+void CodeTextRegion::print(llvm::raw_ostream& os) const {
+ os << "code{";
+ if (isDeclared())
+ os << getDecl()->getDeclName().getAsString();
+ else
+ os << '$' << getSymbol();
+
+ os << '}';
+}
+
+void CompoundLiteralRegion::print(llvm::raw_ostream& os) const {
+ // FIXME: More elaborate pretty-printing.
+ os << "{ " << (void*) CL << " }";
+}
+
+void ElementRegion::print(llvm::raw_ostream& os) const {
+ superRegion->print(os);
+ os << '['; Index.print(os); os << ']';
+}
+
+void FieldRegion::print(llvm::raw_ostream& os) const {
+ superRegion->print(os);
+ os << "->" << getDecl()->getNameAsString();
+}
+
+void StringRegion::print(llvm::raw_ostream& os) const {
+ Str->printPretty(os);
+}
+
+void SymbolicRegion::print(llvm::raw_ostream& os) const {
+ os << "SymRegion-" << sym;
+}
+
+void TypedViewRegion::print(llvm::raw_ostream& os) const {
+ os << "typed_view{" << LValueType.getAsString() << ',';
+ getSuperRegion()->print(os);
+ os << '}';
+}
+
+void VarRegion::print(llvm::raw_ostream& os) const {
+ os << cast<VarDecl>(D)->getNameAsString();
+}
+
+//===----------------------------------------------------------------------===//
+// MemRegionManager methods.
+//===----------------------------------------------------------------------===//
+
+MemSpaceRegion* MemRegionManager::LazyAllocate(MemSpaceRegion*& region) {
+
+ if (!region) {
+ region = (MemSpaceRegion*) A.Allocate<MemSpaceRegion>();
+ new (region) MemSpaceRegion();
+ }
+
+ return region;
+}
+
+MemSpaceRegion* MemRegionManager::getStackRegion() {
+ return LazyAllocate(stack);
+}
+
+MemSpaceRegion* MemRegionManager::getGlobalsRegion() {
+ return LazyAllocate(globals);
+}
+
+MemSpaceRegion* MemRegionManager::getHeapRegion() {
+ return LazyAllocate(heap);
+}
+
+MemSpaceRegion* MemRegionManager::getUnknownRegion() {
+ return LazyAllocate(unknown);
+}
+
+MemSpaceRegion* MemRegionManager::getCodeRegion() {
+ return LazyAllocate(code);
+}
+
+bool MemRegionManager::onStack(const MemRegion* R) {
+ while (const SubRegion* SR = dyn_cast<SubRegion>(R))
+ R = SR->getSuperRegion();
+
+ return (R != 0) && (R == stack);
+}
+
+bool MemRegionManager::onHeap(const MemRegion* R) {
+ while (const SubRegion* SR = dyn_cast<SubRegion>(R))
+ R = SR->getSuperRegion();
+
+ return (R != 0) && (R == heap);
+}
+
+StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str) {
+ llvm::FoldingSetNodeID ID;
+ MemSpaceRegion* GlobalsR = getGlobalsRegion();
+
+ StringRegion::ProfileRegion(ID, Str, GlobalsR);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ StringRegion* R = cast_or_null<StringRegion>(data);
+
+ if (!R) {
+ R = (StringRegion*) A.Allocate<StringRegion>();
+ new (R) StringRegion(Str, GlobalsR);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+VarRegion* MemRegionManager::getVarRegion(const VarDecl* d) {
+
+ const MemRegion* superRegion = d->hasLocalStorage() ? getStackRegion()
+ : getGlobalsRegion();
+
+ llvm::FoldingSetNodeID ID;
+ DeclRegion::ProfileRegion(ID, d, superRegion, MemRegion::VarRegionKind);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ VarRegion* R = cast_or_null<VarRegion>(data);
+
+ if (!R) {
+ R = (VarRegion*) A.Allocate<VarRegion>();
+ new (R) VarRegion(d, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+CompoundLiteralRegion*
+MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL) {
+ // Is this compound literal allocated on the stack or is part of the
+ // global constant pool?
+ const MemRegion* superRegion = CL->isFileScope() ?
+ getGlobalsRegion() : getStackRegion();
+
+ // Profile the compound literal.
+ llvm::FoldingSetNodeID ID;
+ CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ CompoundLiteralRegion* R = cast_or_null<CompoundLiteralRegion>(data);
+
+ if (!R) {
+ R = (CompoundLiteralRegion*) A.Allocate<CompoundLiteralRegion>();
+ new (R) CompoundLiteralRegion(CL, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+ElementRegion*
+MemRegionManager::getElementRegion(QualType elementType, SVal Idx,
+ const MemRegion* superRegion){
+
+ llvm::FoldingSetNodeID ID;
+ ElementRegion::ProfileRegion(ID, elementType, Idx, superRegion);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ ElementRegion* R = cast_or_null<ElementRegion>(data);
+
+ if (!R) {
+ R = (ElementRegion*) A.Allocate<ElementRegion>();
+ new (R) ElementRegion(elementType, Idx, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+CodeTextRegion* MemRegionManager::getCodeTextRegion(const FunctionDecl* fd,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ CodeTextRegion::ProfileRegion(ID, fd, t);
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ CodeTextRegion* R = cast_or_null<CodeTextRegion>(data);
+
+ if (!R) {
+ R = (CodeTextRegion*) A.Allocate<CodeTextRegion>();
+ new (R) CodeTextRegion(fd, t, getCodeRegion());
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+CodeTextRegion* MemRegionManager::getCodeTextRegion(SymbolRef sym, QualType t) {
+ llvm::FoldingSetNodeID ID;
+ CodeTextRegion::ProfileRegion(ID, sym, t);
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ CodeTextRegion* R = cast_or_null<CodeTextRegion>(data);
+
+ if (!R) {
+ R = (CodeTextRegion*) A.Allocate<CodeTextRegion>();
+ new (R) CodeTextRegion(sym, t, getCodeRegion());
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
+SymbolicRegion* MemRegionManager::getSymbolicRegion(SymbolRef sym) {
+ llvm::FoldingSetNodeID ID;
+ SymbolicRegion::ProfileRegion(ID, sym);
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ SymbolicRegion* R = cast_or_null<SymbolicRegion>(data);
+
+ if (!R) {
+ R = (SymbolicRegion*) A.Allocate<SymbolicRegion>();
+ // SymbolicRegion's storage class is usually unknown.
+ new (R) SymbolicRegion(sym, getUnknownRegion());
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+FieldRegion* MemRegionManager::getFieldRegion(const FieldDecl* d,
+ const MemRegion* superRegion) {
+ llvm::FoldingSetNodeID ID;
+ DeclRegion::ProfileRegion(ID, d, superRegion, MemRegion::FieldRegionKind);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ FieldRegion* R = cast_or_null<FieldRegion>(data);
+
+ if (!R) {
+ R = (FieldRegion*) A.Allocate<FieldRegion>();
+ new (R) FieldRegion(d, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+ObjCIvarRegion*
+MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl* d,
+ const MemRegion* superRegion) {
+ llvm::FoldingSetNodeID ID;
+ DeclRegion::ProfileRegion(ID, d, superRegion, MemRegion::ObjCIvarRegionKind);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ ObjCIvarRegion* R = cast_or_null<ObjCIvarRegion>(data);
+
+ if (!R) {
+ R = (ObjCIvarRegion*) A.Allocate<ObjCIvarRegion>();
+ new (R) ObjCIvarRegion(d, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+ObjCObjectRegion*
+MemRegionManager::getObjCObjectRegion(const ObjCInterfaceDecl* d,
+ const MemRegion* superRegion) {
+ llvm::FoldingSetNodeID ID;
+ DeclRegion::ProfileRegion(ID, d, superRegion,
+ MemRegion::ObjCObjectRegionKind);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ ObjCObjectRegion* R = cast_or_null<ObjCObjectRegion>(data);
+
+ if (!R) {
+ R = (ObjCObjectRegion*) A.Allocate<ObjCObjectRegion>();
+ new (R) ObjCObjectRegion(d, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+TypedViewRegion*
+MemRegionManager::getTypedViewRegion(QualType t, const MemRegion* superRegion) {
+ llvm::FoldingSetNodeID ID;
+ TypedViewRegion::ProfileRegion(ID, t, superRegion);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ TypedViewRegion* R = cast_or_null<TypedViewRegion>(data);
+
+ if (!R) {
+ R = (TypedViewRegion*) A.Allocate<TypedViewRegion>();
+ new (R) TypedViewRegion(t, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+AllocaRegion* MemRegionManager::getAllocaRegion(const Expr* E, unsigned cnt) {
+ llvm::FoldingSetNodeID ID;
+ AllocaRegion::ProfileRegion(ID, E, cnt);
+
+ void* InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ AllocaRegion* R = cast_or_null<AllocaRegion>(data);
+
+ if (!R) {
+ R = (AllocaRegion*) A.Allocate<AllocaRegion>();
+ new (R) AllocaRegion(E, cnt, getStackRegion());
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+bool MemRegionManager::hasStackStorage(const MemRegion* R) {
+
+ // Only subregions can have stack storage.
+ const SubRegion* SR = dyn_cast<SubRegion>(R);
+
+ if (!SR)
+ return false;
+
+ MemSpaceRegion* S = getStackRegion();
+
+ while (SR) {
+ R = SR->getSuperRegion();
+ if (R == S)
+ return true;
+
+ SR = dyn_cast<SubRegion>(R);
+ }
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// View handling.
+//===----------------------------------------------------------------------===//
+
+const MemRegion *TypedViewRegion::removeViews() const {
+ const SubRegion *SR = this;
+ const MemRegion *R = SR;
+ while (SR && isa<TypedViewRegion>(SR)) {
+ R = SR->getSuperRegion();
+ SR = dyn_cast<SubRegion>(R);
+ }
+ return R;
+}
diff --git a/lib/Analysis/PathDiagnostic.cpp b/lib/Analysis/PathDiagnostic.cpp
new file mode 100644
index 0000000..ec96329
--- /dev/null
+++ b/lib/Analysis/PathDiagnostic.cpp
@@ -0,0 +1,242 @@
+//===--- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PathDiagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Casting.h"
+#include <sstream>
+using namespace clang;
+using llvm::dyn_cast;
+using llvm::isa;
+
+bool PathDiagnosticMacroPiece::containsEvent() const {
+ for (const_iterator I = begin(), E = end(); I!=E; ++I) {
+ if (isa<PathDiagnosticEventPiece>(*I))
+ return true;
+
+ if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
+ if (MP->containsEvent())
+ return true;
+ }
+
+ return false;
+}
+
+static size_t GetNumCharsToLastNonPeriod(const char *s) {
+ const char *start = s;
+ const char *lastNonPeriod = 0;
+
+ for ( ; *s != '\0' ; ++s)
+ if (*s != '.') lastNonPeriod = s;
+
+ if (!lastNonPeriod)
+ return 0;
+
+ return (lastNonPeriod - start) + 1;
+}
+
+static inline size_t GetNumCharsToLastNonPeriod(const std::string &s) {
+ return s.empty () ? 0 : GetNumCharsToLastNonPeriod(&s[0]);
+}
+
+PathDiagnosticPiece::PathDiagnosticPiece(const std::string& s,
+ Kind k, DisplayHint hint)
+ : str(s, 0, GetNumCharsToLastNonPeriod(s)), kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::PathDiagnosticPiece(const char* s, Kind k,
+ DisplayHint hint)
+ : str(s, GetNumCharsToLastNonPeriod(s)), kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint)
+ : kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::~PathDiagnosticPiece() {}
+PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
+PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
+
+PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {
+ for (iterator I = begin(), E = end(); I != E; ++I) delete *I;
+}
+
+PathDiagnostic::PathDiagnostic() : Size(0) {}
+
+PathDiagnostic::~PathDiagnostic() {
+ for (iterator I = begin(), E = end(); I != E; ++I) delete &*I;
+}
+
+void PathDiagnostic::resetPath(bool deletePieces) {
+ Size = 0;
+
+ if (deletePieces)
+ for (iterator I=begin(), E=end(); I!=E; ++I)
+ delete &*I;
+
+ path.clear();
+}
+
+
+PathDiagnostic::PathDiagnostic(const char* bugtype, const char* desc,
+ const char* category)
+ : Size(0),
+ BugType(bugtype, GetNumCharsToLastNonPeriod(bugtype)),
+ Desc(desc, GetNumCharsToLastNonPeriod(desc)),
+ Category(category, GetNumCharsToLastNonPeriod(category)) {}
+
+PathDiagnostic::PathDiagnostic(const std::string& bugtype,
+ const std::string& desc,
+ const std::string& category)
+ : Size(0),
+ BugType(bugtype, 0, GetNumCharsToLastNonPeriod(bugtype)),
+ Desc(desc, 0, GetNumCharsToLastNonPeriod(desc)),
+ Category(category, 0, GetNumCharsToLastNonPeriod(category)) {}
+
+void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
+ const DiagnosticInfo &Info) {
+
+ // Create a PathDiagnostic with a single piece.
+
+ PathDiagnostic* D = new PathDiagnostic();
+
+ const char *LevelStr;
+ switch (DiagLevel) {
+ default:
+ case Diagnostic::Ignored: assert(0 && "Invalid diagnostic type");
+ case Diagnostic::Note: LevelStr = "note: "; break;
+ case Diagnostic::Warning: LevelStr = "warning: "; break;
+ case Diagnostic::Error: LevelStr = "error: "; break;
+ case Diagnostic::Fatal: LevelStr = "fatal error: "; break;
+ }
+
+ llvm::SmallString<100> StrC;
+ StrC += LevelStr;
+ Info.FormatDiagnostic(StrC);
+
+ PathDiagnosticPiece *P =
+ new PathDiagnosticEventPiece(Info.getLocation(),
+ std::string(StrC.begin(), StrC.end()));
+
+ for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
+ P->addRange(Info.getRange(i));
+ for (unsigned i = 0, e = Info.getNumCodeModificationHints(); i != e; ++i)
+ P->addCodeModificationHint(Info.getCodeModificationHint(i));
+ D->push_front(P);
+
+ HandlePathDiagnostic(D);
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticLocation methods.
+//===----------------------------------------------------------------------===//
+
+FullSourceLoc PathDiagnosticLocation::asLocation() const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ case RangeK:
+ break;
+ case StmtK:
+ return FullSourceLoc(S->getLocStart(), const_cast<SourceManager&>(*SM));
+ case DeclK:
+ return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
+ }
+
+ return FullSourceLoc(R.getBegin(), const_cast<SourceManager&>(*SM));
+}
+
+PathDiagnosticRange PathDiagnosticLocation::asRange() const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ return PathDiagnosticRange(R, true);
+ case RangeK:
+ break;
+ case StmtK: {
+ const Stmt *S = asStmt();
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ if (DS->isSingleDecl()) {
+ // Should always be the case, but we'll be defensive.
+ return SourceRange(DS->getLocStart(),
+ DS->getSingleDecl()->getLocation());
+ }
+ break;
+ }
+ // FIXME: Provide better range information for different
+ // terminators.
+ case Stmt::IfStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::ChooseExprClass:
+ case Stmt::IndirectGotoStmtClass:
+ case Stmt::SwitchStmtClass:
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::ObjCForCollectionStmtClass: {
+ SourceLocation L = S->getLocStart();
+ return SourceRange(L, L);
+ }
+ }
+
+ return S->getSourceRange();
+ }
+ case DeclK:
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getSourceRange();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // FIXME: We would like to always get the function body, even
+ // when it needs to be de-serialized, but getting the
+ // ASTContext here requires significant changes.
+ if (Stmt *Body = FD->getBodyIfAvailable()) {
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
+ return CS->getSourceRange();
+ else
+ return cast<CXXTryStmt>(Body)->getSourceRange();
+ }
+ }
+ else {
+ SourceLocation L = D->getLocation();
+ return PathDiagnosticRange(SourceRange(L, L), true);
+ }
+ }
+
+ return R;
+}
+
+void PathDiagnosticLocation::flatten() {
+ if (K == StmtK) {
+ R = asRange();
+ K = RangeK;
+ S = 0;
+ D = 0;
+ }
+ else if (K == DeclK) {
+ SourceLocation L = D->getLocation();
+ R = SourceRange(L, L);
+ K = SingleLocK;
+ S = 0;
+ D = 0;
+ }
+}
+
+
diff --git a/lib/Analysis/RangeConstraintManager.cpp b/lib/Analysis/RangeConstraintManager.cpp
new file mode 100644
index 0000000..f6ac2b9
--- /dev/null
+++ b/lib/Analysis/RangeConstraintManager.cpp
@@ -0,0 +1,363 @@
+//== RangeConstraintManager.cpp - Manage range constraints.------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines RangeConstraintManager, a class that tracks simple
+// equality and inequality constraints on symbolic values of GRState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/PathSensitive/GRStateTrait.h"
+#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
+#include "clang/Frontend/ManagerRegistry.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace { class VISIBILITY_HIDDEN ConstraintRange {}; }
+static int ConstraintRangeIndex = 0;
+
+/// A Range represents the closed range [from, to]. The caller must
+/// guarantee that from <= to. Note that Range is immutable, so as not
+/// to subvert RangeSet's immutability.
+namespace {
+class VISIBILITY_HIDDEN Range : public std::pair<const llvm::APSInt*,
+ const llvm::APSInt*> {
+public:
+ Range(const llvm::APSInt &from, const llvm::APSInt &to)
+ : std::pair<const llvm::APSInt*, const llvm::APSInt*>(&from, &to) {
+ assert(from <= to);
+ }
+ bool Includes(const llvm::APSInt &v) const {
+ return *first <= v && v <= *second;
+ }
+ const llvm::APSInt &From() const {
+ return *first;
+ }
+ const llvm::APSInt &To() const {
+ return *second;
+ }
+ const llvm::APSInt *getConcreteValue() const {
+ return &From() == &To() ? &From() : NULL;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(&From());
+ ID.AddPointer(&To());
+ }
+};
+
+
+class VISIBILITY_HIDDEN RangeTrait : public llvm::ImutContainerInfo<Range> {
+public:
+ // When comparing if one Range is less than another, we should compare
+ // the actual APSInt values instead of their pointers. This keeps the order
+ // consistent (instead of comparing by pointer values) and can potentially
+ // be used to speed up some of the operations in RangeSet.
+ static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
+ return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
+ *lhs.second < *rhs.second);
+ }
+};
+
+/// RangeSet contains a set of ranges. If the set is empty, then
+/// there the value of a symbol is overly constrained and there are no
+/// possible values for that symbol.
+class VISIBILITY_HIDDEN RangeSet {
+ typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
+ PrimRangeSet ranges; // no need to make const, since it is an
+ // ImmutableSet - this allows default operator=
+ // to work.
+public:
+ typedef PrimRangeSet::Factory Factory;
+ typedef PrimRangeSet::iterator iterator;
+
+ RangeSet(PrimRangeSet RS) : ranges(RS) {}
+ RangeSet(Factory& F) : ranges(F.GetEmptySet()) {}
+
+ iterator begin() const { return ranges.begin(); }
+ iterator end() const { return ranges.end(); }
+
+ bool isEmpty() const { return ranges.isEmpty(); }
+
+ /// Construct a new RangeSet representing '{ [from, to] }'.
+ RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
+ : ranges(F.Add(F.GetEmptySet(), Range(from, to))) {}
+
+ /// Profile - Generates a hash profile of this RangeSet for use
+ /// by FoldingSet.
+ void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
+
+ /// getConcreteValue - If a symbol is contrained to equal a specific integer
+ /// constant then this method returns that value. Otherwise, it returns
+ /// NULL.
+ const llvm::APSInt* getConcreteValue() const {
+ return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0;
+ }
+
+ /// AddEQ - Create a new RangeSet with the additional constraint that the
+ /// value be equal to V.
+ RangeSet AddEQ(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ // Search for a range that includes 'V'. If so, return a new RangeSet
+ // representing { [V, V] }.
+ for (PrimRangeSet::iterator i = begin(), e = end(); i!=e; ++i)
+ if (i->Includes(V))
+ return RangeSet(F, V, V);
+
+ return RangeSet(F);
+ }
+
+ /// AddNE - Create a new RangeSet with the additional constraint that the
+ /// value be not be equal to V.
+ RangeSet AddNE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = ranges;
+
+ // FIXME: We can perhaps enhance ImmutableSet to do this search for us
+ // in log(N) time using the sorted property of the internal AVL tree.
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ if (i->Includes(V)) {
+ // Remove the old range.
+ newRanges = F.Remove(newRanges, *i);
+ // Split the old range into possibly one or two ranges.
+ if (V != i->From())
+ newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
+ if (V != i->To())
+ newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
+ // All of the ranges are non-overlapping, so we can stop.
+ break;
+ }
+ }
+
+ return newRanges;
+ }
+
+ /// AddNE - Create a new RangeSet with the additional constraint that the
+ /// value be less than V.
+ RangeSet AddLT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (iterator i = begin(), e = end() ; i != e ; ++i) {
+ if (i->Includes(V) && i->From() < V)
+ newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
+ else if (i->To() < V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ RangeSet AddLE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ // Strictly we should test for includes *V + 1, but no harm is
+ // done by this formulation
+ if (i->Includes(V))
+ newRanges = F.Add(newRanges, Range(i->From(), V));
+ else if (i->To() <= V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ RangeSet AddGT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
+ if (i->Includes(V) && i->To() > V)
+ newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
+ else if (i->From() > V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ RangeSet AddGE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+ PrimRangeSet newRanges = F.GetEmptySet();
+
+ for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
+ // Strictly we should test for includes *V - 1, but no harm is
+ // done by this formulation
+ if (i->Includes(V))
+ newRanges = F.Add(newRanges, Range(V, i->To()));
+ else if (i->From() >= V)
+ newRanges = F.Add(newRanges, *i);
+ }
+
+ return newRanges;
+ }
+
+ void Print(std::ostream &os) const {
+ bool isFirst = true;
+ os << "{ ";
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ if (isFirst)
+ isFirst = false;
+ else
+ os << ", ";
+
+ os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
+ << ']';
+ }
+ os << " }";
+ }
+
+ bool operator==(const RangeSet &other) const {
+ return ranges == other.ranges;
+ }
+};
+} // end anonymous namespace
+
+typedef llvm::ImmutableMap<SymbolRef,RangeSet> ConstraintRangeTy;
+
+namespace clang {
+template<>
+struct GRStateTrait<ConstraintRange>
+ : public GRStatePartialTrait<ConstraintRangeTy> {
+ static inline void* GDMIndex() { return &ConstraintRangeIndex; }
+};
+}
+
+namespace {
+class VISIBILITY_HIDDEN RangeConstraintManager : public SimpleConstraintManager{
+ RangeSet GetRange(GRStateRef state, SymbolRef sym);
+public:
+ RangeConstraintManager(GRStateManager& statemgr)
+ : SimpleConstraintManager(statemgr) {}
+
+ const GRState* AssumeSymNE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymEQ(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymLT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymGT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymGE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const GRState* AssumeSymLE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V, bool& isFeasible);
+
+ const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const;
+
+ // FIXME: Refactor into SimpleConstraintManager?
+ bool isEqual(const GRState* St, SymbolRef sym, const llvm::APSInt& V) const {
+ const llvm::APSInt *i = getSymVal(St, sym);
+ return i ? *i == V : false;
+ }
+
+ const GRState* RemoveDeadBindings(const GRState* St, SymbolReaper& SymReaper);
+
+ void print(const GRState* St, std::ostream& Out,
+ const char* nl, const char *sep);
+
+private:
+ RangeSet::Factory F;
+};
+
+} // end anonymous namespace
+
+ConstraintManager* clang::CreateRangeConstraintManager(GRStateManager& StateMgr)
+{
+ return new RangeConstraintManager(StateMgr);
+}
+
+const llvm::APSInt* RangeConstraintManager::getSymVal(const GRState* St,
+ SymbolRef sym) const {
+ const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym);
+ return T ? T->getConcreteValue() : NULL;
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+const GRState*
+RangeConstraintManager::RemoveDeadBindings(const GRState* St,
+ SymbolReaper& SymReaper) {
+ GRStateRef state(St, StateMgr);
+
+ ConstraintRangeTy CR = state.get<ConstraintRange>();
+ ConstraintRangeTy::Factory& CRFactory = state.get_context<ConstraintRange>();
+
+ for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym))
+ CR = CRFactory.Remove(CR, sym);
+ }
+
+ return state.set<ConstraintRange>(CR);
+}
+
+//===------------------------------------------------------------------------===
+// AssumeSymX methods: public interface for RangeConstraintManager.
+//===------------------------------------------------------------------------===/
+
+RangeSet
+RangeConstraintManager::GetRange(GRStateRef state, SymbolRef sym) {
+ if (ConstraintRangeTy::data_type* V = state.get<ConstraintRange>(sym))
+ return *V;
+
+ // Lazily generate a new RangeSet representing all possible values for the
+ // given symbol type.
+ QualType T = state.getSymbolManager().getType(sym);
+ BasicValueFactory& BV = state.getBasicVals();
+ return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T));
+}
+
+//===------------------------------------------------------------------------===
+// AssumeSymX methods: public interface for RangeConstraintManager.
+//===------------------------------------------------------------------------===/
+
+#define AssumeX(OP)\
+const GRState*\
+RangeConstraintManager::AssumeSym ## OP(const GRState* St, SymbolRef sym,\
+ const llvm::APSInt& V, bool& isFeasible){\
+ GRStateRef state(St, StateMgr);\
+ const RangeSet& R = GetRange(state, sym).Add##OP(state.getBasicVals(), F, V);\
+ isFeasible = !R.isEmpty();\
+ return isFeasible ? state.set<ConstraintRange>(sym, R).getState() : 0;\
+}
+
+AssumeX(EQ)
+AssumeX(NE)
+AssumeX(LT)
+AssumeX(GT)
+AssumeX(LE)
+AssumeX(GE)
+
+//===------------------------------------------------------------------------===
+// Pretty-printing.
+//===------------------------------------------------------------------------===/
+
+void RangeConstraintManager::print(const GRState* St, std::ostream& Out,
+ const char* nl, const char *sep) {
+
+ ConstraintRangeTy Ranges = St->get<ConstraintRange>();
+
+ if (Ranges.isEmpty())
+ return;
+
+ Out << nl << sep << "ranges of symbol values:";
+
+ for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
+ Out << nl << ' ' << I.getKey() << " : ";
+ I.getData().Print(Out);
+ }
+}
diff --git a/lib/Analysis/RegionStore.cpp b/lib/Analysis/RegionStore.cpp
new file mode 100644
index 0000000..02d3d1f
--- /dev/null
+++ b/lib/Analysis/RegionStore.cpp
@@ -0,0 +1,1304 @@
+//== RegionStore.cpp - Field-sensitive store model --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a basic region store model. In this model, we do have field
+// sensitivity. But we assume nothing about the heap shape. So recursive data
+// structures are largely ignored. Basically we do 1-limiting analysis.
+// Parameter pointers are assumed with no aliasing. Pointee objects of
+// parameters are created lazily.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Analysis/PathSensitive/MemRegion.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Analysis/PathSensitive/GRStateTrait.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Basic/TargetInfo.h"
+
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace clang;
+
+// Actual Store type.
+typedef llvm::ImmutableMap<const MemRegion*, SVal> RegionBindingsTy;
+
+//===----------------------------------------------------------------------===//
+// Region "Views"
+//===----------------------------------------------------------------------===//
+//
+// MemRegions can be layered on top of each other. This GDM entry tracks
+// what are the MemRegions that layer a given MemRegion.
+//
+typedef llvm::ImmutableSet<const MemRegion*> RegionViews;
+namespace { class VISIBILITY_HIDDEN RegionViewMap {}; }
+static int RegionViewMapIndex = 0;
+namespace clang {
+ template<> struct GRStateTrait<RegionViewMap>
+ : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*,
+ RegionViews> > {
+
+ static void* GDMIndex() { return &RegionViewMapIndex; }
+ };
+}
+
+// RegionCasts records the current cast type of a region.
+namespace { class VISIBILITY_HIDDEN RegionCasts {}; }
+static int RegionCastsIndex = 0;
+namespace clang {
+ template<> struct GRStateTrait<RegionCasts>
+ : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*,
+ QualType> > {
+ static void* GDMIndex() { return &RegionCastsIndex; }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Region "Extents"
+//===----------------------------------------------------------------------===//
+//
+// MemRegions represent chunks of memory with a size (their "extent"). This
+// GDM entry tracks the extents for regions. Extents are in bytes.
+//
+namespace { class VISIBILITY_HIDDEN RegionExtents {}; }
+static int RegionExtentsIndex = 0;
+namespace clang {
+ template<> struct GRStateTrait<RegionExtents>
+ : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*, SVal> > {
+ static void* GDMIndex() { return &RegionExtentsIndex; }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Region "killsets".
+//===----------------------------------------------------------------------===//
+//
+// RegionStore lazily adds value bindings to regions when the analyzer handles
+// assignment statements. Killsets track which default values have been
+// killed, thus distinguishing between "unknown" values and default
+// values. Regions are added to killset only when they are assigned "unknown"
+// directly, otherwise we should have their value in the region bindings.
+//
+namespace { class VISIBILITY_HIDDEN RegionKills {}; }
+static int RegionKillsIndex = 0;
+namespace clang {
+ template<> struct GRStateTrait<RegionKills>
+ : public GRStatePartialTrait< llvm::ImmutableSet<const MemRegion*> > {
+ static void* GDMIndex() { return &RegionKillsIndex; }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Regions with default values.
+//===----------------------------------------------------------------------===//
+//
+// This GDM entry tracks what regions have a default value if they have no bound
+// value and have not been killed.
+//
+namespace { class VISIBILITY_HIDDEN RegionDefaultValue {}; }
+static int RegionDefaultValueIndex = 0;
+namespace clang {
+ template<> struct GRStateTrait<RegionDefaultValue>
+ : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*, SVal> > {
+ static void* GDMIndex() { return &RegionDefaultValueIndex; }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Main RegionStore logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN RegionStoreSubRegionMap : public SubRegionMap {
+ typedef llvm::DenseMap<const MemRegion*,
+ llvm::ImmutableSet<const MemRegion*> > Map;
+
+ llvm::ImmutableSet<const MemRegion*>::Factory F;
+ Map M;
+
+public:
+ void add(const MemRegion* Parent, const MemRegion* SubRegion) {
+ Map::iterator I = M.find(Parent);
+ M.insert(std::make_pair(Parent,
+ F.Add(I == M.end() ? F.GetEmptySet() : I->second, SubRegion)));
+ }
+
+ ~RegionStoreSubRegionMap() {}
+
+ bool iterSubRegions(const MemRegion* Parent, Visitor& V) const {
+ Map::iterator I = M.find(Parent);
+
+ if (I == M.end())
+ return true;
+
+ llvm::ImmutableSet<const MemRegion*> S = I->second;
+ for (llvm::ImmutableSet<const MemRegion*>::iterator SI=S.begin(),SE=S.end();
+ SI != SE; ++SI) {
+ if (!V.Visit(Parent, *SI))
+ return false;
+ }
+
+ return true;
+ }
+};
+
+class VISIBILITY_HIDDEN RegionStoreManager : public StoreManager {
+ RegionBindingsTy::Factory RBFactory;
+ RegionViews::Factory RVFactory;
+
+ const MemRegion* SelfRegion;
+ const ImplicitParamDecl *SelfDecl;
+
+public:
+ RegionStoreManager(GRStateManager& mgr)
+ : StoreManager(mgr),
+ RBFactory(mgr.getAllocator()),
+ RVFactory(mgr.getAllocator()),
+ SelfRegion(0), SelfDecl(0) {
+ if (const ObjCMethodDecl* MD =
+ dyn_cast<ObjCMethodDecl>(&StateMgr.getCodeDecl()))
+ SelfDecl = MD->getSelfDecl();
+ }
+
+ virtual ~RegionStoreManager() {}
+
+ SubRegionMap* getSubRegionMap(const GRState *state);
+
+ const GRState* BindCompoundLiteral(const GRState* St,
+ const CompoundLiteralExpr* CL, SVal V);
+
+ /// getLValueString - Returns an SVal representing the lvalue of a
+ /// StringLiteral. Within RegionStore a StringLiteral has an
+ /// associated StringRegion, and the lvalue of a StringLiteral is
+ /// the lvalue of that region.
+ SVal getLValueString(const GRState* St, const StringLiteral* S);
+
+ /// getLValueCompoundLiteral - Returns an SVal representing the
+ /// lvalue of a compound literal. Within RegionStore a compound
+ /// literal has an associated region, and the lvalue of the
+ /// compound literal is the lvalue of that region.
+ SVal getLValueCompoundLiteral(const GRState* St, const CompoundLiteralExpr*);
+
+ /// getLValueVar - Returns an SVal that represents the lvalue of a
+ /// variable. Within RegionStore a variable has an associated
+ /// VarRegion, and the lvalue of the variable is the lvalue of that region.
+ SVal getLValueVar(const GRState* St, const VarDecl* VD);
+
+ SVal getLValueIvar(const GRState* St, const ObjCIvarDecl* D, SVal Base);
+
+ SVal getLValueField(const GRState* St, SVal Base, const FieldDecl* D);
+
+ SVal getLValueFieldOrIvar(const GRState* St, SVal Base, const Decl* D);
+
+ SVal getLValueElement(const GRState* St, QualType elementType,
+ SVal Base, SVal Offset);
+
+ SVal getSizeInElements(const GRState* St, const MemRegion* R);
+
+ /// ArrayToPointer - Emulates the "decay" of an array to a pointer
+ /// type. 'Array' represents the lvalue of the array being decayed
+ /// to a pointer, and the returned SVal represents the decayed
+ /// version of that lvalue (i.e., a pointer to the first element of
+ /// the array). This is called by GRExprEngine when evaluating
+ /// casts from arrays to pointers.
+ SVal ArrayToPointer(Loc Array);
+
+ CastResult CastRegion(const GRState* state, const MemRegion* R,
+ QualType CastToTy);
+
+ SVal EvalBinOp(const GRState *state,BinaryOperator::Opcode Op,Loc L,NonLoc R);
+
+ /// The high level logic for this method is this:
+ /// Retrieve (L)
+ /// if L has binding
+ /// return L's binding
+ /// else if L is in killset
+ /// return unknown
+ /// else
+ /// if L is on stack or heap
+ /// return undefined
+ /// else
+ /// return symbolic
+ SVal Retrieve(const GRState* state, Loc L, QualType T = QualType());
+
+ const GRState* Bind(const GRState* St, Loc LV, SVal V);
+
+ Store Remove(Store store, Loc LV);
+
+ Store getInitialStore() { return RBFactory.GetEmptyMap().getRoot(); }
+
+ /// getSelfRegion - Returns the region for the 'self' (Objective-C) or
+ /// 'this' object (C++). When used when analyzing a normal function this
+ /// method returns NULL.
+ const MemRegion* getSelfRegion(Store) {
+ if (!SelfDecl)
+ return 0;
+
+ if (!SelfRegion) {
+ const ObjCMethodDecl *MD = cast<ObjCMethodDecl>(&StateMgr.getCodeDecl());
+ SelfRegion = MRMgr.getObjCObjectRegion(MD->getClassInterface(),
+ MRMgr.getHeapRegion());
+ }
+
+ return SelfRegion;
+ }
+
+ /// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
+ /// It returns a new Store with these values removed, and populates LSymbols
+ // and DSymbols with the known set of live and dead symbols respectively.
+ Store RemoveDeadBindings(const GRState* state, Stmt* Loc,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
+
+ const GRState* BindDecl(const GRState* St, const VarDecl* VD, SVal InitVal);
+
+ const GRState* BindDeclWithNoInit(const GRState* St, const VarDecl* VD) {
+ return St;
+ }
+
+ const GRState* setExtent(const GRState* St, const MemRegion* R, SVal Extent);
+ const GRState* setCastType(const GRState* St, const MemRegion* R, QualType T);
+
+ static inline RegionBindingsTy GetRegionBindings(Store store) {
+ return RegionBindingsTy(static_cast<const RegionBindingsTy::TreeTy*>(store));
+ }
+
+ void print(Store store, std::ostream& Out, const char* nl, const char *sep);
+
+ void iterBindings(Store store, BindingsHandler& f) {
+ // FIXME: Implement.
+ }
+ const GRState* setDefaultValue(const GRState* St, const MemRegion* R, SVal V);
+private:
+ const GRState* BindArray(const GRState* St, const TypedRegion* R, SVal V);
+
+ /// Retrieve the values in a struct and return a CompoundVal, used when doing
+ /// struct copy:
+ /// struct s x, y;
+ /// x = y;
+ /// y's value is retrieved by this method.
+ SVal RetrieveStruct(const GRState* St, const TypedRegion* R);
+
+ SVal RetrieveArray(const GRState* St, const TypedRegion* R);
+
+ const GRState* BindStruct(const GRState* St, const TypedRegion* R, SVal V);
+
+ /// KillStruct - Set the entire struct to unknown.
+ const GRState* KillStruct(const GRState* St, const TypedRegion* R);
+
+ // Utility methods.
+ BasicValueFactory& getBasicVals() { return StateMgr.getBasicVals(); }
+ ASTContext& getContext() { return StateMgr.getContext(); }
+
+ SymbolManager& getSymbolManager() { return StateMgr.getSymbolManager(); }
+
+ const GRState* AddRegionView(const GRState* St,
+ const MemRegion* View, const MemRegion* Base);
+ const GRState* RemoveRegionView(const GRState* St,
+ const MemRegion* View, const MemRegion* Base);
+};
+
+} // end anonymous namespace
+
+StoreManager* clang::CreateRegionStoreManager(GRStateManager& StMgr) {
+ return new RegionStoreManager(StMgr);
+}
+
+SubRegionMap* RegionStoreManager::getSubRegionMap(const GRState *state) {
+ RegionBindingsTy B = GetRegionBindings(state->getStore());
+ RegionStoreSubRegionMap *M = new RegionStoreSubRegionMap();
+
+ for (RegionBindingsTy::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ if (const SubRegion* R = dyn_cast<SubRegion>(I.getKey()))
+ M->add(R->getSuperRegion(), R);
+ }
+
+ return M;
+}
+
+/// getLValueString - Returns an SVal representing the lvalue of a
+/// StringLiteral. Within RegionStore a StringLiteral has an
+/// associated StringRegion, and the lvalue of a StringLiteral is the
+/// lvalue of that region.
+SVal RegionStoreManager::getLValueString(const GRState* St,
+ const StringLiteral* S) {
+ return loc::MemRegionVal(MRMgr.getStringRegion(S));
+}
+
+/// getLValueVar - Returns an SVal that represents the lvalue of a
+/// variable. Within RegionStore a variable has an associated
+/// VarRegion, and the lvalue of the variable is the lvalue of that region.
+SVal RegionStoreManager::getLValueVar(const GRState* St, const VarDecl* VD) {
+ return loc::MemRegionVal(MRMgr.getVarRegion(VD));
+}
+
+/// getLValueCompoundLiteral - Returns an SVal representing the lvalue
+/// of a compound literal. Within RegionStore a compound literal
+/// has an associated region, and the lvalue of the compound literal
+/// is the lvalue of that region.
+SVal
+RegionStoreManager::getLValueCompoundLiteral(const GRState* St,
+ const CompoundLiteralExpr* CL) {
+ return loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL));
+}
+
+SVal RegionStoreManager::getLValueIvar(const GRState* St, const ObjCIvarDecl* D,
+ SVal Base) {
+ return getLValueFieldOrIvar(St, Base, D);
+}
+
+SVal RegionStoreManager::getLValueField(const GRState* St, SVal Base,
+ const FieldDecl* D) {
+ return getLValueFieldOrIvar(St, Base, D);
+}
+
+SVal RegionStoreManager::getLValueFieldOrIvar(const GRState* St, SVal Base,
+ const Decl* D) {
+ if (Base.isUnknownOrUndef())
+ return Base;
+
+ Loc BaseL = cast<Loc>(Base);
+ const MemRegion* BaseR = 0;
+
+ switch (BaseL.getSubKind()) {
+ case loc::MemRegionKind:
+ BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
+ break;
+
+ case loc::GotoLabelKind:
+ // These are anormal cases. Flag an undefined value.
+ return UndefinedVal();
+
+ case loc::ConcreteIntKind:
+ // While these seem funny, this can happen through casts.
+ // FIXME: What we should return is the field offset. For example,
+ // add the field offset to the integer value. That way funny things
+ // like this work properly: &(((struct foo *) 0xa)->f)
+ return Base;
+
+ default:
+ assert(0 && "Unhandled Base.");
+ return Base;
+ }
+
+ // NOTE: We must have this check first because ObjCIvarDecl is a subclass
+ // of FieldDecl.
+ if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
+ return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
+
+ return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
+}
+
+SVal RegionStoreManager::getLValueElement(const GRState* St,
+ QualType elementType,
+ SVal Base, SVal Offset) {
+
+ // If the base is an unknown or undefined value, just return it back.
+ // FIXME: For absolute pointer addresses, we just return that value back as
+ // well, although in reality we should return the offset added to that
+ // value.
+ if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base))
+ return Base;
+
+ // Only handle integer offsets... for now.
+ if (!isa<nonloc::ConcreteInt>(Offset))
+ return UnknownVal();
+
+ const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion();
+
+ // Pointer of any type can be cast and used as array base.
+ const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
+
+ if (!ElemR) {
+ //
+ // If the base region is not an ElementRegion, create one.
+ // This can happen in the following example:
+ //
+ // char *p = __builtin_alloc(10);
+ // p[1] = 8;
+ //
+ // Observe that 'p' binds to an AllocaRegion.
+ //
+
+ // Offset might be unsigned. We have to convert it to signed ConcreteInt.
+ if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&Offset)) {
+ const llvm::APSInt& OffI = CI->getValue();
+ if (OffI.isUnsigned()) {
+ llvm::APSInt Tmp = OffI;
+ Tmp.setIsSigned(true);
+ Offset = NonLoc::MakeVal(getBasicVals(), Tmp);
+ }
+ }
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+ BaseRegion));
+ }
+
+ SVal BaseIdx = ElemR->getIndex();
+
+ if (!isa<nonloc::ConcreteInt>(BaseIdx))
+ return UnknownVal();
+
+ const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue();
+ const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue();
+ assert(BaseIdxI.isSigned());
+
+ // FIXME: This appears to be the assumption of this code. We should review
+ // whether or not BaseIdxI.getBitWidth() < OffI.getBitWidth(). If it
+ // can't we need to put a comment here. If it can, we should handle it.
+ assert(BaseIdxI.getBitWidth() >= OffI.getBitWidth());
+
+ const MemRegion *ArrayR = ElemR->getSuperRegion();
+ SVal NewIdx;
+
+ if (OffI.isUnsigned() || OffI.getBitWidth() < BaseIdxI.getBitWidth()) {
+ // 'Offset' might be unsigned. We have to convert it to signed and
+ // possibly extend it.
+ llvm::APSInt Tmp = OffI;
+
+ if (OffI.getBitWidth() < BaseIdxI.getBitWidth())
+ Tmp.extend(BaseIdxI.getBitWidth());
+
+ Tmp.setIsSigned(true);
+ Tmp += BaseIdxI; // Compute the new offset.
+ NewIdx = NonLoc::MakeVal(getBasicVals(), Tmp);
+ }
+ else
+ NewIdx = nonloc::ConcreteInt(getBasicVals().getValue(BaseIdxI + OffI));
+
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR));
+}
+
+SVal RegionStoreManager::getSizeInElements(const GRState* St,
+ const MemRegion* R) {
+ if (const VarRegion* VR = dyn_cast<VarRegion>(R)) {
+ // Get the type of the variable.
+ QualType T = VR->getDesugaredValueType(getContext());
+
+ // FIXME: Handle variable-length arrays.
+ if (isa<VariableArrayType>(T))
+ return UnknownVal();
+
+ if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) {
+ // return the size as signed integer.
+ return NonLoc::MakeVal(getBasicVals(), CAT->getSize(), false);
+ }
+
+ GRStateRef state(St, StateMgr);
+ const QualType* CastTy = state.get<RegionCasts>(VR);
+
+ // If the VarRegion is cast to other type, compute the size with respect to
+ // that type.
+ if (CastTy) {
+ QualType EleTy =cast<PointerType>(CastTy->getTypePtr())->getPointeeType();
+ QualType VarTy = VR->getValueType(getContext());
+ uint64_t EleSize = getContext().getTypeSize(EleTy);
+ uint64_t VarSize = getContext().getTypeSize(VarTy);
+ return NonLoc::MakeIntVal(getBasicVals(), VarSize / EleSize, false);
+ }
+
+ // Clients can use ordinary variables as if they were arrays. These
+ // essentially are arrays of size 1.
+ return NonLoc::MakeIntVal(getBasicVals(), 1, false);
+ }
+
+ if (const StringRegion* SR = dyn_cast<StringRegion>(R)) {
+ const StringLiteral* Str = SR->getStringLiteral();
+ // We intentionally made the size value signed because it participates in
+ // operations with signed indices.
+ return NonLoc::MakeIntVal(getBasicVals(), Str->getByteLength()+1, false);
+ }
+
+ if (const FieldRegion* FR = dyn_cast<FieldRegion>(R)) {
+ // FIXME: Unsupported yet.
+ FR = 0;
+ return UnknownVal();
+ }
+
+ if (isa<SymbolicRegion>(R)) {
+ return UnknownVal();
+ }
+
+ if (isa<AllocaRegion>(R)) {
+ return UnknownVal();
+ }
+
+ if (isa<ElementRegion>(R)) {
+ return UnknownVal();
+ }
+
+ assert(0 && "Other regions are not supported yet.");
+ return UnknownVal();
+}
+
+/// ArrayToPointer - Emulates the "decay" of an array to a pointer
+/// type. 'Array' represents the lvalue of the array being decayed
+/// to a pointer, and the returned SVal represents the decayed
+/// version of that lvalue (i.e., a pointer to the first element of
+/// the array). This is called by GRExprEngine when evaluating casts
+/// from arrays to pointers.
+SVal RegionStoreManager::ArrayToPointer(Loc Array) {
+ if (!isa<loc::MemRegionVal>(Array))
+ return UnknownVal();
+
+ const MemRegion* R = cast<loc::MemRegionVal>(&Array)->getRegion();
+ const TypedRegion* ArrayR = dyn_cast<TypedRegion>(R);
+
+ if (!ArrayR)
+ return UnknownVal();
+
+ // Strip off typedefs from the ArrayRegion's ValueType.
+ QualType T = ArrayR->getValueType(getContext())->getDesugaredType();
+ ArrayType *AT = cast<ArrayType>(T);
+ T = AT->getElementType();
+
+ nonloc::ConcreteInt Idx(getBasicVals().getZeroWithPtrWidth(false));
+ ElementRegion* ER = MRMgr.getElementRegion(T, Idx, ArrayR);
+
+ return loc::MemRegionVal(ER);
+}
+
+RegionStoreManager::CastResult
+RegionStoreManager::CastRegion(const GRState* state, const MemRegion* R,
+ QualType CastToTy) {
+
+ ASTContext& Ctx = StateMgr.getContext();
+
+ // We need to know the real type of CastToTy.
+ QualType ToTy = Ctx.getCanonicalType(CastToTy);
+
+ // Check cast to ObjCQualifiedID type.
+ if (isa<ObjCQualifiedIdType>(ToTy)) {
+ // FIXME: Record the type information aside.
+ return CastResult(state, R);
+ }
+
+ // CodeTextRegion should be cast to only function pointer type.
+ if (isa<CodeTextRegion>(R)) {
+ assert(CastToTy->isFunctionPointerType() || CastToTy->isBlockPointerType());
+ return CastResult(state, R);
+ }
+
+ // Now assume we are casting from pointer to pointer. Other cases should
+ // already be handled.
+ QualType PointeeTy = cast<PointerType>(ToTy.getTypePtr())->getPointeeType();
+
+ // Process region cast according to the kind of the region being cast.
+
+ // FIXME: Need to handle arbitrary downcasts.
+ if (isa<SymbolicRegion>(R) || isa<AllocaRegion>(R)) {
+ state = setCastType(state, R, ToTy);
+ return CastResult(state, R);
+ }
+
+ // VarRegion, ElementRegion, and FieldRegion has an inherent type. Normally
+ // they should not be cast. We only layer an ElementRegion when the cast-to
+ // pointee type is of smaller size. In other cases, we return the original
+ // VarRegion.
+ if (isa<VarRegion>(R) || isa<ElementRegion>(R) || isa<FieldRegion>(R)
+ || isa<ObjCIvarRegion>(R) || isa<CompoundLiteralRegion>(R)) {
+ // If the pointee type is incomplete, do not compute its size, and return
+ // the original region.
+ if (const RecordType *RT = dyn_cast<RecordType>(PointeeTy.getTypePtr())) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition(getContext()))
+ return CastResult(state, R);
+ }
+
+ QualType ObjTy = cast<TypedRegion>(R)->getValueType(getContext());
+ uint64_t PointeeTySize = getContext().getTypeSize(PointeeTy);
+ uint64_t ObjTySize = getContext().getTypeSize(ObjTy);
+
+ if ((PointeeTySize > 0 && PointeeTySize < ObjTySize) ||
+ (ObjTy->isAggregateType() && PointeeTy->isScalarType())) {
+ // Record the cast type of the region.
+ state = setCastType(state, R, ToTy);
+
+ SVal Idx = ValMgr.makeZeroArrayIndex();
+ ElementRegion* ER = MRMgr.getElementRegion(PointeeTy, Idx, R);
+ return CastResult(state, ER);
+ } else
+ return CastResult(state, R);
+ }
+
+ if (isa<ObjCObjectRegion>(R)) {
+ return CastResult(state, R);
+ }
+
+ assert(0 && "Unprocessed region.");
+ return 0;
+}
+
+SVal RegionStoreManager::EvalBinOp(const GRState *state,
+ BinaryOperator::Opcode Op, Loc L, NonLoc R) {
+ // Assume the base location is MemRegionVal.
+ if (!isa<loc::MemRegionVal>(L))
+ return UnknownVal();
+
+ const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion();
+ const ElementRegion *ER = 0;
+
+ // If the operand is a symbolic or alloca region, create the first element
+ // region on it.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR)) {
+ // Get symbol's type. It should be a pointer type.
+ SymbolRef Sym = SR->getSymbol();
+ QualType T = Sym->getType(getContext());
+ QualType EleTy = cast<PointerType>(T.getTypePtr())->getPointeeType();
+
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ ER = MRMgr.getElementRegion(EleTy, ZeroIdx, SR);
+ }
+ else if (const AllocaRegion *AR = dyn_cast<AllocaRegion>(MR)) {
+ // Get the alloca region's current cast type.
+ GRStateRef StRef(state, StateMgr);
+
+ GRStateTrait<RegionCasts>::lookup_type T = StRef.get<RegionCasts>(AR);
+ assert(T && "alloca region has no type.");
+ QualType EleTy = cast<PointerType>(T->getTypePtr())->getPointeeType();
+ SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
+ ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR);
+ }
+ else
+ ER = cast<ElementRegion>(MR);
+
+ SVal Idx = ER->getIndex();
+
+ nonloc::ConcreteInt* Base = dyn_cast<nonloc::ConcreteInt>(&Idx);
+ nonloc::ConcreteInt* Offset = dyn_cast<nonloc::ConcreteInt>(&R);
+
+ // Only support concrete integer indexes for now.
+ if (Base && Offset) {
+ // FIXME: For now, convert the signedness and bitwidth of offset in case
+ // they don't match. This can result from pointer arithmetic. In reality,
+ // we should figure out what are the proper semantics and implement them.
+ //
+ // This addresses the test case test/Analysis/ptr-arith.c
+ //
+ nonloc::ConcreteInt OffConverted(getBasicVals().Convert(Base->getValue(),
+ Offset->getValue()));
+ SVal NewIdx = Base->EvalBinOp(getBasicVals(), Op, OffConverted);
+ const MemRegion* NewER =
+ MRMgr.getElementRegion(ER->getElementType(), NewIdx,ER->getSuperRegion());
+ return Loc::MakeVal(NewER);
+
+ }
+
+ return UnknownVal();
+}
+
+SVal RegionStoreManager::Retrieve(const GRState* St, Loc L, QualType T) {
+ assert(!isa<UnknownVal>(L) && "location unknown");
+ assert(!isa<UndefinedVal>(L) && "location undefined");
+
+ // FIXME: Is this even possible? Shouldn't this be treated as a null
+ // dereference at a higher level?
+ if (isa<loc::ConcreteInt>(L))
+ return UndefinedVal();
+
+ const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion();
+
+ // FIXME: return symbolic value for these cases.
+ // Example:
+ // void f(int* p) { int x = *p; }
+ // char* p = alloca();
+ // read(p);
+ // c = *p;
+ if (isa<SymbolicRegion>(MR) || isa<AllocaRegion>(MR))
+ return UnknownVal();
+
+ // FIXME: Perhaps this method should just take a 'const MemRegion*' argument
+ // instead of 'Loc', and have the other Loc cases handled at a higher level.
+ const TypedRegion* R = cast<TypedRegion>(MR);
+ assert(R && "bad region");
+
+ // FIXME: We should eventually handle funny addressing. e.g.:
+ //
+ // int x = ...;
+ // int *p = &x;
+ // char *q = (char*) p;
+ // char c = *q; // returns the first byte of 'x'.
+ //
+ // Such funny addressing will occur due to layering of regions.
+
+ QualType RTy = R->getValueType(getContext());
+
+ if (RTy->isStructureType())
+ return RetrieveStruct(St, R);
+
+ if (RTy->isArrayType())
+ return RetrieveArray(St, R);
+
+ // FIXME: handle Vector types.
+ if (RTy->isVectorType())
+ return UnknownVal();
+
+ RegionBindingsTy B = GetRegionBindings(St->getStore());
+ RegionBindingsTy::data_type* V = B.lookup(R);
+
+ // Check if the region has a binding.
+ if (V)
+ return *V;
+
+ GRStateRef state(St, StateMgr);
+
+ // Check if the region is in killset.
+ if (state.contains<RegionKills>(R))
+ return UnknownVal();
+
+ // Check if the region is an element region of a string literal.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ if (const StringRegion *StrR=dyn_cast<StringRegion>(ER->getSuperRegion())) {
+ const StringLiteral *Str = StrR->getStringLiteral();
+ SVal Idx = ER->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Idx)) {
+ int64_t i = CI->getValue().getSExtValue();
+ char c;
+ if (i == Str->getByteLength())
+ c = '\0';
+ else
+ c = Str->getStrData()[i];
+ const llvm::APSInt &V = getBasicVals().getValue(c, getContext().CharTy);
+ return nonloc::ConcreteInt(V);
+ }
+ }
+ }
+
+ // If the region is an element or field, it may have a default value.
+ if (isa<ElementRegion>(R) || isa<FieldRegion>(R)) {
+ const MemRegion* SuperR = cast<SubRegion>(R)->getSuperRegion();
+ GRStateTrait<RegionDefaultValue>::lookup_type D =
+ state.get<RegionDefaultValue>(SuperR);
+ if (D) {
+ // If the default value is symbolic, we need to create a new symbol.
+ if (D->hasConjuredSymbol())
+ return ValMgr.getRegionValueSymbolVal(R);
+ else
+ return *D;
+ }
+ }
+
+ if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
+ const MemRegion *SR = IVR->getSuperRegion();
+
+ // If the super region is 'self' then return the symbol representing
+ // the value of the ivar upon entry to the method.
+ if (SR == SelfRegion) {
+ // FIXME: Do we need to handle the case where the super region
+ // has a view? We want to canonicalize the bindings.
+ return ValMgr.getRegionValueSymbolVal(R);
+ }
+
+ // Otherwise, we need a new symbol. For now return Unknown.
+ return UnknownVal();
+ }
+
+ // The location does not have a bound value. This means that it has
+ // the value it had upon its creation and/or entry to the analyzed
+ // function/method. These are either symbolic values or 'undefined'.
+
+ // We treat function parameters as symbolic values.
+ if (const VarRegion* VR = dyn_cast<VarRegion>(R)) {
+ const VarDecl *VD = VR->getDecl();
+
+ if (VD == SelfDecl)
+ return loc::MemRegionVal(getSelfRegion(0));
+
+ if (isa<ParmVarDecl>(VD) || isa<ImplicitParamDecl>(VD) ||
+ VD->hasGlobalStorage()) {
+ QualType VTy = VD->getType();
+ if (Loc::IsLocType(VTy) || VTy->isIntegerType())
+ return ValMgr.getRegionValueSymbolVal(VR);
+ else
+ return UnknownVal();
+ }
+ }
+
+ if (MRMgr.onStack(R) || MRMgr.onHeap(R)) {
+ // All stack variables are considered to have undefined values
+ // upon creation. All heap allocated blocks are considered to
+ // have undefined values as well unless they are explicitly bound
+ // to specific values.
+ return UndefinedVal();
+ }
+
+ // All other integer values are symbolic.
+ if (Loc::IsLocType(RTy) || RTy->isIntegerType())
+ return ValMgr.getRegionValueSymbolVal(R);
+ else
+ return UnknownVal();
+}
+
+SVal RegionStoreManager::RetrieveStruct(const GRState* St,const TypedRegion* R){
+ QualType T = R->getValueType(getContext());
+ assert(T->isStructureType());
+
+ const RecordType* RT = cast<RecordType>(T.getTypePtr());
+ RecordDecl* RD = RT->getDecl();
+ assert(RD->isDefinition());
+
+ llvm::ImmutableList<SVal> StructVal = getBasicVals().getEmptySValList();
+
+ std::vector<FieldDecl *> Fields(RD->field_begin(getContext()),
+ RD->field_end(getContext()));
+
+ for (std::vector<FieldDecl *>::reverse_iterator Field = Fields.rbegin(),
+ FieldEnd = Fields.rend();
+ Field != FieldEnd; ++Field) {
+ FieldRegion* FR = MRMgr.getFieldRegion(*Field, R);
+ QualType FTy = (*Field)->getType();
+ SVal FieldValue = Retrieve(St, loc::MemRegionVal(FR), FTy);
+ StructVal = getBasicVals().consVals(FieldValue, StructVal);
+ }
+
+ return NonLoc::MakeCompoundVal(T, StructVal, getBasicVals());
+}
+
+SVal RegionStoreManager::RetrieveArray(const GRState* St, const TypedRegion* R){
+ QualType T = R->getValueType(getContext());
+ ConstantArrayType* CAT = cast<ConstantArrayType>(T.getTypePtr());
+
+ llvm::ImmutableList<SVal> ArrayVal = getBasicVals().getEmptySValList();
+ llvm::APSInt Size(CAT->getSize(), false);
+ llvm::APSInt i = getBasicVals().getZeroWithPtrWidth(false);
+
+ for (; i < Size; ++i) {
+ SVal Idx = NonLoc::MakeVal(getBasicVals(), i);
+ ElementRegion* ER = MRMgr.getElementRegion(CAT->getElementType(), Idx, R);
+ QualType ETy = ER->getElementType();
+ SVal ElementVal = Retrieve(St, loc::MemRegionVal(ER), ETy);
+ ArrayVal = getBasicVals().consVals(ElementVal, ArrayVal);
+ }
+
+ return NonLoc::MakeCompoundVal(T, ArrayVal, getBasicVals());
+}
+
+const GRState* RegionStoreManager::Bind(const GRState* St, Loc L, SVal V) {
+ // If we get here, the location should be a region.
+ const MemRegion* R = cast<loc::MemRegionVal>(L).getRegion();
+ assert(R);
+
+ // Check if the region is a struct region.
+ if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
+ if (TR->getValueType(getContext())->isStructureType())
+ return BindStruct(St, TR, V);
+
+ Store store = St->getStore();
+ RegionBindingsTy B = GetRegionBindings(store);
+
+ if (V.isUnknown()) {
+ // Remove the binding.
+ store = RBFactory.Remove(B, R).getRoot();
+
+ // Add the region to the killset.
+ GRStateRef state(St, StateMgr);
+ St = state.add<RegionKills>(R);
+ }
+ else
+ store = RBFactory.Add(B, R, V).getRoot();
+
+ return StateMgr.MakeStateWithStore(St, store);
+}
+
+Store RegionStoreManager::Remove(Store store, Loc L) {
+ const MemRegion* R = 0;
+
+ if (isa<loc::MemRegionVal>(L))
+ R = cast<loc::MemRegionVal>(L).getRegion();
+
+ if (R) {
+ RegionBindingsTy B = GetRegionBindings(store);
+ return RBFactory.Remove(B, R).getRoot();
+ }
+
+ return store;
+}
+
+const GRState* RegionStoreManager::BindDecl(const GRState* St,
+ const VarDecl* VD, SVal InitVal) {
+
+ QualType T = VD->getType();
+ VarRegion* VR = MRMgr.getVarRegion(VD);
+
+ if (T->isArrayType())
+ return BindArray(St, VR, InitVal);
+ if (T->isStructureType())
+ return BindStruct(St, VR, InitVal);
+
+ return Bind(St, Loc::MakeVal(VR), InitVal);
+}
+
+// FIXME: this method should be merged into Bind().
+const GRState*
+RegionStoreManager::BindCompoundLiteral(const GRState* St,
+ const CompoundLiteralExpr* CL, SVal V) {
+ CompoundLiteralRegion* R = MRMgr.getCompoundLiteralRegion(CL);
+ return Bind(St, loc::MemRegionVal(R), V);
+}
+
+const GRState* RegionStoreManager::setExtent(const GRState* St,
+ const MemRegion* R, SVal Extent) {
+ GRStateRef state(St, StateMgr);
+ return state.set<RegionExtents>(R, Extent);
+}
+
+
+static void UpdateLiveSymbols(SVal X, SymbolReaper& SymReaper) {
+ if (loc::MemRegionVal *XR = dyn_cast<loc::MemRegionVal>(&X)) {
+ const MemRegion *R = XR->getRegion();
+
+ while (R) {
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ SymReaper.markLive(SR->getSymbol());
+ return;
+ }
+
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ R = SR->getSuperRegion();
+ continue;
+ }
+
+ break;
+ }
+
+ return;
+ }
+
+ for (SVal::symbol_iterator SI=X.symbol_begin(), SE=X.symbol_end();SI!=SE;++SI)
+ SymReaper.markLive(*SI);
+}
+
+Store RegionStoreManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
+ SymbolReaper& SymReaper,
+ llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
+{
+
+ Store store = state->getStore();
+ RegionBindingsTy B = GetRegionBindings(store);
+
+ // Lazily constructed backmap from MemRegions to SubRegions.
+ typedef llvm::ImmutableSet<const MemRegion*> SubRegionsTy;
+ typedef llvm::ImmutableMap<const MemRegion*, SubRegionsTy> SubRegionsMapTy;
+
+ // FIXME: As a future optimization we can modifiy BumpPtrAllocator to have
+ // the ability to reuse memory. This way we can keep TmpAlloc around as
+ // an instance variable of RegionStoreManager (avoiding repeated malloc
+ // overhead).
+ llvm::BumpPtrAllocator TmpAlloc;
+
+ // Factory objects.
+ SubRegionsMapTy::Factory SubRegMapF(TmpAlloc);
+ SubRegionsTy::Factory SubRegF(TmpAlloc);
+
+ // The backmap from regions to subregions.
+ SubRegionsMapTy SubRegMap = SubRegMapF.GetEmptyMap();
+
+ // Do a pass over the regions in the store. For VarRegions we check if
+ // the variable is still live and if so add it to the list of live roots.
+ // For other regions we populate our region backmap.
+
+ llvm::SmallVector<const MemRegion*, 10> IntermediateRoots;
+
+ for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ IntermediateRoots.push_back(I.getKey());
+ }
+
+ while (!IntermediateRoots.empty()) {
+ const MemRegion* R = IntermediateRoots.back();
+ IntermediateRoots.pop_back();
+
+ if (const VarRegion* VR = dyn_cast<VarRegion>(R)) {
+ if (SymReaper.isLive(Loc, VR->getDecl()))
+ RegionRoots.push_back(VR); // This is a live "root".
+ }
+ else if (const SymbolicRegion* SR = dyn_cast<SymbolicRegion>(R)) {
+ if (SymReaper.isLive(SR->getSymbol()))
+ RegionRoots.push_back(SR);
+ }
+ else {
+ // Get the super region for R.
+ const MemRegion* SuperR = cast<SubRegion>(R)->getSuperRegion();
+
+ // Get the current set of subregions for SuperR.
+ const SubRegionsTy* SRptr = SubRegMap.lookup(SuperR);
+ SubRegionsTy SRs = SRptr ? *SRptr : SubRegF.GetEmptySet();
+
+ // Add R to the subregions of SuperR.
+ SubRegMap = SubRegMapF.Add(SubRegMap, SuperR, SubRegF.Add(SRs, R));
+
+ // Super region may be VarRegion or subregion of another VarRegion. Add it
+ // to the work list.
+ if (isa<SubRegion>(SuperR))
+ IntermediateRoots.push_back(SuperR);
+ }
+ }
+
+ // Process the worklist of RegionRoots. This performs a "mark-and-sweep"
+ // of the store. We want to find all live symbols and dead regions.
+ llvm::SmallPtrSet<const MemRegion*, 10> Marked;
+
+ while (!RegionRoots.empty()) {
+ // Dequeue the next region on the worklist.
+ const MemRegion* R = RegionRoots.back();
+ RegionRoots.pop_back();
+
+ // Check if we have already processed this region.
+ if (Marked.count(R)) continue;
+
+ // Mark this region as processed. This is needed for termination in case
+ // a region is referenced more than once.
+ Marked.insert(R);
+
+ // Mark the symbol for any live SymbolicRegion as "live". This means we
+ // should continue to track that symbol.
+ if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(R))
+ SymReaper.markLive(SymR->getSymbol());
+
+ // Get the data binding for R (if any).
+ RegionBindingsTy::data_type* Xptr = B.lookup(R);
+ if (Xptr) {
+ SVal X = *Xptr;
+ UpdateLiveSymbols(X, SymReaper); // Update the set of live symbols.
+
+ // If X is a region, then add it the RegionRoots.
+ if (loc::MemRegionVal* RegionX = dyn_cast<loc::MemRegionVal>(&X))
+ RegionRoots.push_back(RegionX->getRegion());
+ }
+
+ // Get the subregions of R. These are RegionRoots as well since they
+ // represent values that are also bound to R.
+ const SubRegionsTy* SRptr = SubRegMap.lookup(R);
+ if (!SRptr) continue;
+ SubRegionsTy SR = *SRptr;
+
+ for (SubRegionsTy::iterator I=SR.begin(), E=SR.end(); I!=E; ++I)
+ RegionRoots.push_back(*I);
+ }
+
+ // We have now scanned the store, marking reachable regions and symbols
+ // as live. We now remove all the regions that are dead from the store
+ // as well as update DSymbols with the set symbols that are now dead.
+ for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const MemRegion* R = I.getKey();
+
+ // If this region live? Is so, none of its symbols are dead.
+ if (Marked.count(R))
+ continue;
+
+ // Remove this dead region from the store.
+ store = Remove(store, Loc::MakeVal(R));
+
+ // Mark all non-live symbols that this region references as dead.
+ if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(R))
+ SymReaper.maybeDead(SymR->getSymbol());
+
+ SVal X = I.getData();
+ SVal::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+ for (; SI != SE; ++SI) SymReaper.maybeDead(*SI);
+ }
+
+ return store;
+}
+
+void RegionStoreManager::print(Store store, std::ostream& Out,
+ const char* nl, const char *sep) {
+ llvm::raw_os_ostream OS(Out);
+ RegionBindingsTy B = GetRegionBindings(store);
+ OS << "Store:" << nl;
+
+ for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ OS << ' '; I.getKey()->print(OS); OS << " : ";
+ I.getData().print(OS); OS << nl;
+ }
+}
+
+const GRState* RegionStoreManager::BindArray(const GRState* St,
+ const TypedRegion* R, SVal Init) {
+ QualType T = R->getValueType(getContext());
+ assert(T->isArrayType());
+
+ // When we are binding the whole array, it always has default value 0.
+ GRStateRef state(St, StateMgr);
+ St = state.set<RegionDefaultValue>(R, NonLoc::MakeIntVal(getBasicVals(), 0,
+ false));
+
+ ConstantArrayType* CAT = cast<ConstantArrayType>(T.getTypePtr());
+
+ llvm::APSInt Size(CAT->getSize(), false);
+ llvm::APSInt i = getBasicVals().getValue(0, Size.getBitWidth(),
+ Size.isUnsigned());
+
+ // Check if the init expr is a StringLiteral.
+ if (isa<loc::MemRegionVal>(Init)) {
+ const MemRegion* InitR = cast<loc::MemRegionVal>(Init).getRegion();
+ const StringLiteral* S = cast<StringRegion>(InitR)->getStringLiteral();
+ const char* str = S->getStrData();
+ unsigned len = S->getByteLength();
+ unsigned j = 0;
+
+ // Copy bytes from the string literal into the target array. Trailing bytes
+ // in the array that are not covered by the string literal are initialized
+ // to zero.
+ for (; i < Size; ++i, ++j) {
+ if (j >= len)
+ break;
+
+ SVal Idx = NonLoc::MakeVal(getBasicVals(), i);
+ ElementRegion* ER =
+ MRMgr.getElementRegion(cast<ArrayType>(T)->getElementType(),
+ Idx, R);
+
+ SVal V = NonLoc::MakeVal(getBasicVals(), str[j], sizeof(char)*8, true);
+ St = Bind(St, loc::MemRegionVal(ER), V);
+ }
+
+ return St;
+ }
+
+ nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(Init);
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+
+ for (; i < Size; ++i, ++VI) {
+ // The init list might be shorter than the array decl.
+ if (VI == VE)
+ break;
+
+ SVal Idx = NonLoc::MakeVal(getBasicVals(), i);
+ ElementRegion* ER =
+ MRMgr.getElementRegion(cast<ArrayType>(T)->getElementType(),
+ Idx, R);
+
+ if (CAT->getElementType()->isStructureType())
+ St = BindStruct(St, ER, *VI);
+ else
+ St = Bind(St, Loc::MakeVal(ER), *VI);
+ }
+
+ return St;
+}
+
+const GRState*
+RegionStoreManager::BindStruct(const GRState* St, const TypedRegion* R, SVal V){
+ QualType T = R->getValueType(getContext());
+ assert(T->isStructureType());
+
+ const RecordType* RT = T->getAsRecordType();
+ RecordDecl* RD = RT->getDecl();
+
+ if (!RD->isDefinition())
+ return St;
+
+ if (V.isUnknown())
+ return KillStruct(St, R);
+
+ nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(V);
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+ RecordDecl::field_iterator FI = RD->field_begin(getContext()),
+ FE = RD->field_end(getContext());
+
+ for (; FI != FE; ++FI, ++VI) {
+
+ // There may be fewer values than fields only when we are initializing a
+ // struct decl. In this case, mark the region as having default value.
+ if (VI == VE) {
+ GRStateRef state(St, StateMgr);
+ const NonLoc& Idx = NonLoc::MakeIntVal(getBasicVals(), 0, false);
+ St = state.set<RegionDefaultValue>(R, Idx);
+ break;
+ }
+
+ QualType FTy = (*FI)->getType();
+ FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
+
+ if (Loc::IsLocType(FTy) || FTy->isIntegerType())
+ St = Bind(St, Loc::MakeVal(FR), *VI);
+
+ else if (FTy->isArrayType())
+ St = BindArray(St, FR, *VI);
+
+ else if (FTy->isStructureType())
+ St = BindStruct(St, FR, *VI);
+ }
+
+ return St;
+}
+
+const GRState* RegionStoreManager::KillStruct(const GRState* St,
+ const TypedRegion* R){
+ GRStateRef state(St, StateMgr);
+
+ // Kill the struct region because it is assigned "unknown".
+ St = state.add<RegionKills>(R);
+
+ // Set the default value of the struct region to "unknown".
+ St = state.set<RegionDefaultValue>(R, UnknownVal());
+
+ Store store = St->getStore();
+ RegionBindingsTy B = GetRegionBindings(store);
+
+ // Remove all bindings for the subregions of the struct.
+ for (RegionBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const MemRegion* r = I.getKey();
+ if (const SubRegion* sr = dyn_cast<SubRegion>(r))
+ if (sr->isSubRegionOf(R))
+ store = Remove(store, Loc::MakeVal(sr));
+ // FIXME: Maybe we should also remove the bindings for the "views" of the
+ // subregions.
+ }
+
+ return StateMgr.MakeStateWithStore(St, store);
+}
+
+const GRState* RegionStoreManager::AddRegionView(const GRState* St,
+ const MemRegion* View,
+ const MemRegion* Base) {
+ GRStateRef state(St, StateMgr);
+
+ // First, retrieve the region view of the base region.
+ const RegionViews* d = state.get<RegionViewMap>(Base);
+ RegionViews L = d ? *d : RVFactory.GetEmptySet();
+
+ // Now add View to the region view.
+ L = RVFactory.Add(L, View);
+
+ // Create a new state with the new region view.
+ return state.set<RegionViewMap>(Base, L);
+}
+
+const GRState* RegionStoreManager::RemoveRegionView(const GRState* St,
+ const MemRegion* View,
+ const MemRegion* Base) {
+ GRStateRef state(St, StateMgr);
+
+ // Retrieve the region view of the base region.
+ const RegionViews* d = state.get<RegionViewMap>(Base);
+
+ // If the base region has no view, return.
+ if (!d)
+ return St;
+
+ // Remove the view.
+ RegionViews V = *d;
+ V = RVFactory.Remove(V, View);
+
+ return state.set<RegionViewMap>(Base, V);
+}
+
+const GRState* RegionStoreManager::setCastType(const GRState* St,
+ const MemRegion* R, QualType T) {
+ GRStateRef state(St, StateMgr);
+ return state.set<RegionCasts>(R, T);
+}
+
+const GRState* RegionStoreManager::setDefaultValue(const GRState* St,
+ const MemRegion* R, SVal V) {
+ GRStateRef state(St, StateMgr);
+ return state.set<RegionDefaultValue>(R, V);
+}
diff --git a/lib/Analysis/SVals.cpp b/lib/Analysis/SVals.cpp
new file mode 100644
index 0000000..e19b168
--- /dev/null
+++ b/lib/Analysis/SVals.cpp
@@ -0,0 +1,513 @@
+//= RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SVal, Loc, and NonLoc, classes that represent
+// abstract r-values for use with path-sensitive value tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/GRState.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/Support/Streams.h"
+
+using namespace clang;
+using llvm::dyn_cast;
+using llvm::cast;
+using llvm::APSInt;
+
+//===----------------------------------------------------------------------===//
+// Symbol iteration within an SVal.
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+bool SVal::hasConjuredSymbol() const {
+ if (const nonloc::SymbolVal* SV = dyn_cast<nonloc::SymbolVal>(this)) {
+ SymbolRef sym = SV->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+
+ if (const loc::MemRegionVal *RV = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion *R = RV->getRegion();
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ SymbolRef sym = SR->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ } else if (const CodeTextRegion *CTR = dyn_cast<CodeTextRegion>(R)) {
+ if (CTR->isSymbolic()) {
+ SymbolRef sym = CTR->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+const FunctionDecl* SVal::getAsFunctionDecl() const {
+ if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion* R = X->getRegion();
+ if (const CodeTextRegion* CTR = R->getAs<CodeTextRegion>()) {
+ if (CTR->isDeclared())
+ return CTR->getDecl();
+ }
+ }
+
+ return 0;
+}
+
+/// getAsLocSymbol - If this SVal is a location (subclasses Loc) and
+/// wraps a symbol, return that SymbolRef. Otherwise return 0.
+// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+SymbolRef SVal::getAsLocSymbol() const {
+ if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion *R = X->getRegion();
+
+ while (R) {
+ // Blast through region views.
+ if (const TypedViewRegion *View = dyn_cast<TypedViewRegion>(R)) {
+ R = View->getSuperRegion();
+ continue;
+ }
+
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ return SymR->getSymbol();
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/// getAsSymbol - If this Sval wraps a symbol return that SymbolRef.
+/// Otherwise return 0.
+// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+SymbolRef SVal::getAsSymbol() const {
+ if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
+ return X->getSymbol();
+
+ if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
+ if (SymbolRef Y = dyn_cast<SymbolData>(X->getSymbolicExpression()))
+ return Y;
+
+ return getAsLocSymbol();
+}
+
+/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
+/// return that expression. Otherwise return NULL.
+const SymExpr *SVal::getAsSymbolicExpression() const {
+ if (const nonloc::SymExprVal *X = dyn_cast<nonloc::SymExprVal>(this))
+ return X->getSymbolicExpression();
+
+ return getAsSymbol();
+}
+
+bool SVal::symbol_iterator::operator==(const symbol_iterator &X) const {
+ return itr == X.itr;
+}
+
+bool SVal::symbol_iterator::operator!=(const symbol_iterator &X) const {
+ return itr != X.itr;
+}
+
+SVal::symbol_iterator::symbol_iterator(const SymExpr *SE) {
+ itr.push_back(SE);
+ while (!isa<SymbolData>(itr.back())) expand();
+}
+
+SVal::symbol_iterator& SVal::symbol_iterator::operator++() {
+ assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
+ assert(isa<SymbolData>(itr.back()));
+ itr.pop_back();
+ if (!itr.empty())
+ while (!isa<SymbolData>(itr.back())) expand();
+ return *this;
+}
+
+SymbolRef SVal::symbol_iterator::operator*() {
+ assert(!itr.empty() && "attempting to dereference an 'end' iterator");
+ return cast<SymbolData>(itr.back());
+}
+
+void SVal::symbol_iterator::expand() {
+ const SymExpr *SE = itr.back();
+ itr.pop_back();
+
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
+ itr.push_back(SIE->getLHS());
+ return;
+ }
+ else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
+ itr.push_back(SSE->getLHS());
+ itr.push_back(SSE->getRHS());
+ return;
+ }
+
+ assert(false && "unhandled expansion case");
+}
+
+//===----------------------------------------------------------------------===//
+// Other Iterators.
+//===----------------------------------------------------------------------===//
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::begin() const {
+ return getValue()->begin();
+}
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
+ return getValue()->end();
+}
+
+//===----------------------------------------------------------------------===//
+// Useful predicates.
+//===----------------------------------------------------------------------===//
+
+bool SVal::isZeroConstant() const {
+ if (isa<loc::ConcreteInt>(*this))
+ return cast<loc::ConcreteInt>(*this).getValue() == 0;
+ else if (isa<nonloc::ConcreteInt>(*this))
+ return cast<nonloc::ConcreteInt>(*this).getValue() == 0;
+ else
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Non-Locs.
+//===----------------------------------------------------------------------===//
+
+SVal nonloc::ConcreteInt::EvalBinOp(BasicValueFactory& BasicVals,
+ BinaryOperator::Opcode Op,
+ const nonloc::ConcreteInt& R) const {
+
+ const llvm::APSInt* X =
+ BasicVals.EvaluateAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return nonloc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+ // Bitwise-Complement.
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::EvalComplement(BasicValueFactory& BasicVals) const {
+ return BasicVals.getValue(~getValue());
+}
+
+ // Unary Minus.
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::EvalMinus(BasicValueFactory& BasicVals, UnaryOperator* U) const {
+ assert (U->getType() == U->getSubExpr()->getType());
+ assert (U->getType()->isIntegerType());
+ return BasicVals.getValue(-getValue());
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Locs.
+//===----------------------------------------------------------------------===//
+
+SVal loc::ConcreteInt::EvalBinOp(BasicValueFactory& BasicVals,
+ BinaryOperator::Opcode Op,
+ const loc::ConcreteInt& R) const {
+
+ assert (Op == BinaryOperator::Add || Op == BinaryOperator::Sub ||
+ (Op >= BinaryOperator::LT && Op <= BinaryOperator::NE));
+
+ const llvm::APSInt* X = BasicVals.EvaluateAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return loc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods for constructing SVals.
+//===----------------------------------------------------------------------===//
+
+SVal ValueManager::makeZeroVal(QualType T) {
+ if (Loc::IsLocType(T))
+ return Loc::MakeNull(BasicVals);
+
+ if (T->isIntegerType())
+ return NonLoc::MakeVal(BasicVals, 0, T);
+
+ // FIXME: Handle floats.
+ // FIXME: Handle structs.
+ return UnknownVal();
+}
+
+SVal ValueManager::makeZeroArrayIndex() {
+ return nonloc::ConcreteInt(BasicVals.getZeroWithPtrWidth(false));
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods for constructing Non-Locs.
+//===----------------------------------------------------------------------===//
+
+NonLoc ValueManager::makeNonLoc(SymbolRef sym) {
+ return nonloc::SymbolVal(sym);
+}
+
+NonLoc ValueManager::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const APSInt& v, QualType T) {
+ // The Environment ensures we always get a persistent APSInt in
+ // BasicValueFactory, so we don't need to get the APSInt from
+ // BasicValueFactory again.
+ assert(!Loc::IsLocType(T));
+ return nonloc::SymExprVal(SymMgr.getSymIntExpr(lhs, op, v, T));
+}
+
+NonLoc ValueManager::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType T) {
+ assert(SymMgr.getType(lhs) == SymMgr.getType(rhs));
+ assert(!Loc::IsLocType(T));
+ return nonloc::SymExprVal(SymMgr.getSymSymExpr(lhs, op, rhs, T));
+}
+
+NonLoc NonLoc::MakeIntVal(BasicValueFactory& BasicVals, uint64_t X,
+ bool isUnsigned) {
+ return nonloc::ConcreteInt(BasicVals.getIntValue(X, isUnsigned));
+}
+
+NonLoc NonLoc::MakeVal(BasicValueFactory& BasicVals, uint64_t X,
+ unsigned BitWidth, bool isUnsigned) {
+ return nonloc::ConcreteInt(BasicVals.getValue(X, BitWidth, isUnsigned));
+}
+
+NonLoc NonLoc::MakeVal(BasicValueFactory& BasicVals, uint64_t X, QualType T) {
+ return nonloc::ConcreteInt(BasicVals.getValue(X, T));
+}
+
+NonLoc NonLoc::MakeVal(BasicValueFactory& BasicVals, IntegerLiteral* I) {
+
+ return nonloc::ConcreteInt(BasicVals.getValue(APSInt(I->getValue(),
+ I->getType()->isUnsignedIntegerType())));
+}
+
+NonLoc NonLoc::MakeVal(BasicValueFactory& BasicVals, const llvm::APInt& I,
+ bool isUnsigned) {
+ return nonloc::ConcreteInt(BasicVals.getValue(I, isUnsigned));
+}
+
+NonLoc NonLoc::MakeVal(BasicValueFactory& BasicVals, const llvm::APSInt& I) {
+ return nonloc::ConcreteInt(BasicVals.getValue(I));
+}
+
+NonLoc NonLoc::MakeIntTruthVal(BasicValueFactory& BasicVals, bool b) {
+ return nonloc::ConcreteInt(BasicVals.getTruthValue(b));
+}
+
+NonLoc ValueManager::makeTruthVal(bool b, QualType T) {
+ return nonloc::ConcreteInt(BasicVals.getTruthValue(b, T));
+}
+
+NonLoc NonLoc::MakeCompoundVal(QualType T, llvm::ImmutableList<SVal> Vals,
+ BasicValueFactory& BasicVals) {
+ return nonloc::CompoundVal(BasicVals.getCompoundValData(T, Vals));
+}
+
+SVal ValueManager::getRegionValueSymbolVal(const MemRegion* R) {
+ SymbolRef sym = SymMgr.getRegionValueSymbol(R);
+
+ if (const TypedRegion* TR = dyn_cast<TypedRegion>(R)) {
+ QualType T = TR->getValueType(SymMgr.getContext());
+
+ // If T is of function pointer type, create a CodeTextRegion wrapping a
+ // symbol.
+ if (T->isFunctionPointerType()) {
+ return Loc::MakeVal(MemMgr.getCodeTextRegion(sym, T));
+ }
+
+ if (Loc::IsLocType(T))
+ return Loc::MakeVal(MemMgr.getSymbolicRegion(sym));
+
+ // Only handle integers for now.
+ if (T->isIntegerType() && T->isScalarType())
+ return makeNonLoc(sym);
+ }
+
+ return UnknownVal();
+}
+
+SVal ValueManager::getConjuredSymbolVal(const Expr* E, unsigned Count) {
+ QualType T = E->getType();
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, Count);
+
+ // If T is of function pointer type, create a CodeTextRegion wrapping a
+ // symbol.
+ if (T->isFunctionPointerType()) {
+ return Loc::MakeVal(MemMgr.getCodeTextRegion(sym, T));
+ }
+
+ if (Loc::IsLocType(T))
+ return Loc::MakeVal(MemMgr.getSymbolicRegion(sym));
+
+ if (T->isIntegerType() && T->isScalarType())
+ return makeNonLoc(sym);
+
+ return UnknownVal();
+}
+
+SVal ValueManager::getConjuredSymbolVal(const Expr* E, QualType T,
+ unsigned Count) {
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(E, T, Count);
+
+ // If T is of function pointer type, create a CodeTextRegion wrapping a
+ // symbol.
+ if (T->isFunctionPointerType()) {
+ return Loc::MakeVal(MemMgr.getCodeTextRegion(sym, T));
+ }
+
+ if (Loc::IsLocType(T))
+ return Loc::MakeVal(MemMgr.getSymbolicRegion(sym));
+
+ if (T->isIntegerType() && T->isScalarType())
+ return makeNonLoc(sym);
+
+ return UnknownVal();
+}
+
+SVal ValueManager::getFunctionPointer(const FunctionDecl* FD) {
+ CodeTextRegion* R
+ = MemMgr.getCodeTextRegion(FD, Context.getPointerType(FD->getType()));
+ return loc::MemRegionVal(R);
+}
+
+nonloc::LocAsInteger nonloc::LocAsInteger::Make(BasicValueFactory& Vals, Loc V,
+ unsigned Bits) {
+ return LocAsInteger(Vals.getPersistentSValWithData(V, Bits));
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods for constructing Locs.
+//===----------------------------------------------------------------------===//
+
+Loc Loc::MakeVal(const MemRegion* R) { return loc::MemRegionVal(R); }
+
+Loc Loc::MakeVal(AddrLabelExpr* E) { return loc::GotoLabel(E->getLabel()); }
+
+Loc Loc::MakeNull(BasicValueFactory &BasicVals) {
+ return loc::ConcreteInt(BasicVals.getZeroWithPtrWidth());
+}
+
+//===----------------------------------------------------------------------===//
+// Pretty-Printing.
+//===----------------------------------------------------------------------===//
+
+void SVal::printStdErr() const { print(llvm::errs()); }
+
+void SVal::print(std::ostream& Out) const {
+ llvm::raw_os_ostream out(Out);
+ print(out);
+}
+
+void SVal::print(llvm::raw_ostream& Out) const {
+
+ switch (getBaseKind()) {
+
+ case UnknownKind:
+ Out << "Invalid"; break;
+
+ case NonLocKind:
+ cast<NonLoc>(this)->print(Out); break;
+
+ case LocKind:
+ cast<Loc>(this)->print(Out); break;
+
+ case UndefinedKind:
+ Out << "Undefined"; break;
+
+ default:
+ assert (false && "Invalid SVal.");
+ }
+}
+
+void NonLoc::print(llvm::raw_ostream& Out) const {
+
+ switch (getSubKind()) {
+
+ case nonloc::ConcreteIntKind:
+ Out << cast<nonloc::ConcreteInt>(this)->getValue().getZExtValue();
+
+ if (cast<nonloc::ConcreteInt>(this)->getValue().isUnsigned())
+ Out << 'U';
+
+ break;
+
+ case nonloc::SymbolValKind:
+ Out << '$' << cast<nonloc::SymbolVal>(this)->getSymbol();
+ break;
+
+ case nonloc::SymExprValKind: {
+ const nonloc::SymExprVal& C = *cast<nonloc::SymExprVal>(this);
+ const SymExpr *SE = C.getSymbolicExpression();
+ Out << SE;
+ break;
+ }
+
+ case nonloc::LocAsIntegerKind: {
+ const nonloc::LocAsInteger& C = *cast<nonloc::LocAsInteger>(this);
+ C.getLoc().print(Out);
+ Out << " [as " << C.getNumBits() << " bit integer]";
+ break;
+ }
+
+ case nonloc::CompoundValKind: {
+ const nonloc::CompoundVal& C = *cast<nonloc::CompoundVal>(this);
+ Out << " {";
+ bool first = true;
+ for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
+ if (first) { Out << ' '; first = false; }
+ else Out << ", ";
+ (*I).print(Out);
+ }
+ Out << " }";
+ break;
+ }
+
+ default:
+ assert (false && "Pretty-printed not implemented for this NonLoc.");
+ break;
+ }
+}
+
+void Loc::print(llvm::raw_ostream& Out) const {
+
+ switch (getSubKind()) {
+
+ case loc::ConcreteIntKind:
+ Out << cast<loc::ConcreteInt>(this)->getValue().getZExtValue()
+ << " (Loc)";
+ break;
+
+ case loc::GotoLabelKind:
+ Out << "&&"
+ << cast<loc::GotoLabel>(this)->getLabel()->getID()->getName();
+ break;
+
+ case loc::MemRegionKind:
+ Out << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
+ break;
+
+ default:
+ assert (false && "Pretty-printing not implemented for this Loc.");
+ break;
+ }
+}
diff --git a/lib/Analysis/SimpleConstraintManager.cpp b/lib/Analysis/SimpleConstraintManager.cpp
new file mode 100644
index 0000000..f79dba0
--- /dev/null
+++ b/lib/Analysis/SimpleConstraintManager.cpp
@@ -0,0 +1,263 @@
+//== SimpleConstraintManager.cpp --------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SimpleConstraintManager, a class that holds code shared
+// between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+
+namespace clang {
+
+SimpleConstraintManager::~SimpleConstraintManager() {}
+
+bool SimpleConstraintManager::canReasonAbout(SVal X) const {
+ if (nonloc::SymExprVal *SymVal = dyn_cast<nonloc::SymExprVal>(&X)) {
+ const SymExpr *SE = SymVal->getSymbolicExpression();
+
+ if (isa<SymbolData>(SE))
+ return true;
+
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
+ switch (SIE->getOpcode()) {
+ // We don't reason yet about bitwise-constraints on symbolic values.
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ return false;
+ // We don't reason yet about arithmetic constraints on symbolic values.
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ return false;
+ // All other cases.
+ default:
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+const GRState*
+SimpleConstraintManager::Assume(const GRState* St, SVal Cond, bool Assumption,
+ bool& isFeasible) {
+ if (Cond.isUnknown()) {
+ isFeasible = true;
+ return St;
+ }
+
+ if (isa<NonLoc>(Cond))
+ return Assume(St, cast<NonLoc>(Cond), Assumption, isFeasible);
+ else
+ return Assume(St, cast<Loc>(Cond), Assumption, isFeasible);
+}
+
+const GRState*
+SimpleConstraintManager::Assume(const GRState* St, Loc Cond, bool Assumption,
+ bool& isFeasible) {
+ St = AssumeAux(St, Cond, Assumption, isFeasible);
+
+ if (!isFeasible)
+ return St;
+
+ // EvalAssume is used to call into the GRTransferFunction object to perform
+ // any checker-specific update of the state based on this assumption being
+ // true or false.
+ return StateMgr.getTransferFuncs().EvalAssume(StateMgr, St, Cond, Assumption,
+ isFeasible);
+}
+
+const GRState*
+SimpleConstraintManager::AssumeAux(const GRState* St, Loc Cond, bool Assumption,
+ bool& isFeasible) {
+ BasicValueFactory& BasicVals = StateMgr.getBasicVals();
+
+ switch (Cond.getSubKind()) {
+ default:
+ assert (false && "'Assume' not implemented for this Loc.");
+ return St;
+
+ case loc::MemRegionKind: {
+ // FIXME: Should this go into the storemanager?
+
+ const MemRegion* R = cast<loc::MemRegionVal>(Cond).getRegion();
+ const SubRegion* SubR = dyn_cast<SubRegion>(R);
+
+ while (SubR) {
+ // FIXME: now we only find the first symbolic region.
+ if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(SubR)) {
+ if (Assumption)
+ return AssumeSymNE(St, SymR->getSymbol(),
+ BasicVals.getZeroWithPtrWidth(), isFeasible);
+ else
+ return AssumeSymEQ(St, SymR->getSymbol(),
+ BasicVals.getZeroWithPtrWidth(), isFeasible);
+ }
+ SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
+ }
+
+ // FALL-THROUGH.
+ }
+
+ case loc::GotoLabelKind:
+ isFeasible = Assumption;
+ return St;
+
+ case loc::ConcreteIntKind: {
+ bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0;
+ isFeasible = b ? Assumption : !Assumption;
+ return St;
+ }
+ } // end switch
+}
+
+const GRState*
+SimpleConstraintManager::Assume(const GRState* St, NonLoc Cond, bool Assumption,
+ bool& isFeasible) {
+ St = AssumeAux(St, Cond, Assumption, isFeasible);
+
+ if (!isFeasible)
+ return St;
+
+ // EvalAssume is used to call into the GRTransferFunction object to perform
+ // any checker-specific update of the state based on this assumption being
+ // true or false.
+ return StateMgr.getTransferFuncs().EvalAssume(StateMgr, St, Cond, Assumption,
+ isFeasible);
+}
+
+const GRState*
+SimpleConstraintManager::AssumeAux(const GRState* St,NonLoc Cond,
+ bool Assumption, bool& isFeasible) {
+ // We cannot reason about SymIntExpr and SymSymExpr.
+ if (!canReasonAbout(Cond)) {
+ isFeasible = true;
+ return St;
+ }
+
+ BasicValueFactory& BasicVals = StateMgr.getBasicVals();
+ SymbolManager& SymMgr = StateMgr.getSymbolManager();
+
+ switch (Cond.getSubKind()) {
+ default:
+ assert(false && "'Assume' not implemented for this NonLoc");
+
+ case nonloc::SymbolValKind: {
+ nonloc::SymbolVal& SV = cast<nonloc::SymbolVal>(Cond);
+ SymbolRef sym = SV.getSymbol();
+ QualType T = SymMgr.getType(sym);
+
+ if (Assumption)
+ return AssumeSymNE(St, sym, BasicVals.getValue(0, T), isFeasible);
+ else
+ return AssumeSymEQ(St, sym, BasicVals.getValue(0, T), isFeasible);
+ }
+
+ case nonloc::SymExprValKind: {
+ nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond);
+ if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression()))
+ return AssumeSymInt(St, Assumption, SE, isFeasible);
+
+ isFeasible = true;
+ return St;
+ }
+
+ case nonloc::ConcreteIntKind: {
+ bool b = cast<nonloc::ConcreteInt>(Cond).getValue() != 0;
+ isFeasible = b ? Assumption : !Assumption;
+ return St;
+ }
+
+ case nonloc::LocAsIntegerKind:
+ return AssumeAux(St, cast<nonloc::LocAsInteger>(Cond).getLoc(),
+ Assumption, isFeasible);
+ } // end switch
+}
+
+const GRState*
+SimpleConstraintManager::AssumeSymInt(const GRState* St, bool Assumption,
+ const SymIntExpr *SE, bool& isFeasible) {
+
+
+ // Here we assume that LHS is a symbol. This is consistent with the
+ // rest of the constraint manager logic.
+ SymbolRef Sym = cast<SymbolData>(SE->getLHS());
+ const llvm::APSInt &Int = SE->getRHS();
+
+ switch (SE->getOpcode()) {
+ default:
+ // No logic yet for other operators.
+ isFeasible = true;
+ return St;
+
+ case BinaryOperator::EQ:
+ return Assumption ? AssumeSymEQ(St, Sym, Int, isFeasible)
+ : AssumeSymNE(St, Sym, Int, isFeasible);
+
+ case BinaryOperator::NE:
+ return Assumption ? AssumeSymNE(St, Sym, Int, isFeasible)
+ : AssumeSymEQ(St, Sym, Int, isFeasible);
+
+ case BinaryOperator::GT:
+ return Assumption ? AssumeSymGT(St, Sym, Int, isFeasible)
+ : AssumeSymLE(St, Sym, Int, isFeasible);
+
+ case BinaryOperator::GE:
+ return Assumption ? AssumeSymGE(St, Sym, Int, isFeasible)
+ : AssumeSymLT(St, Sym, Int, isFeasible);
+
+ case BinaryOperator::LT:
+ return Assumption ? AssumeSymLT(St, Sym, Int, isFeasible)
+ : AssumeSymGE(St, Sym, Int, isFeasible);
+
+ case BinaryOperator::LE:
+ return Assumption ? AssumeSymLE(St, Sym, Int, isFeasible)
+ : AssumeSymGT(St, Sym, Int, isFeasible);
+ } // end switch
+}
+
+const GRState*
+SimpleConstraintManager::AssumeInBound(const GRState* St, SVal Idx,
+ SVal UpperBound, bool Assumption,
+ bool& isFeasible) {
+ // Only support ConcreteInt for now.
+ if (!(isa<nonloc::ConcreteInt>(Idx) && isa<nonloc::ConcreteInt>(UpperBound))){
+ isFeasible = true;
+ return St;
+ }
+
+ const llvm::APSInt& Zero = getBasicVals().getZeroWithPtrWidth(false);
+ llvm::APSInt IdxV = cast<nonloc::ConcreteInt>(Idx).getValue();
+ // IdxV might be too narrow.
+ if (IdxV.getBitWidth() < Zero.getBitWidth())
+ IdxV.extend(Zero.getBitWidth());
+ // UBV might be too narrow, too.
+ llvm::APSInt UBV = cast<nonloc::ConcreteInt>(UpperBound).getValue();
+ if (UBV.getBitWidth() < Zero.getBitWidth())
+ UBV.extend(Zero.getBitWidth());
+
+ bool InBound = (Zero <= IdxV) && (IdxV < UBV);
+
+ isFeasible = Assumption ? InBound : !InBound;
+
+ return St;
+}
+
+} // end of namespace clang
diff --git a/lib/Analysis/SimpleConstraintManager.h b/lib/Analysis/SimpleConstraintManager.h
new file mode 100644
index 0000000..fb41e2f
--- /dev/null
+++ b/lib/Analysis/SimpleConstraintManager.h
@@ -0,0 +1,84 @@
+//== SimpleConstraintManager.h ----------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Code shared between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_SIMPLE_CONSTRAINT_MANAGER_H
+#define LLVM_CLANG_ANALYSIS_SIMPLE_CONSTRAINT_MANAGER_H
+
+#include "clang/Analysis/PathSensitive/ConstraintManager.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+
+namespace clang {
+
+class SimpleConstraintManager : public ConstraintManager {
+protected:
+ GRStateManager& StateMgr;
+public:
+ SimpleConstraintManager(GRStateManager& statemgr)
+ : StateMgr(statemgr) {}
+ virtual ~SimpleConstraintManager();
+
+ bool canReasonAbout(SVal X) const;
+
+ virtual const GRState* Assume(const GRState* St, SVal Cond, bool Assumption,
+ bool& isFeasible);
+
+ const GRState* Assume(const GRState* St, Loc Cond, bool Assumption,
+ bool& isFeasible);
+
+ const GRState* AssumeAux(const GRState* St, Loc Cond,bool Assumption,
+ bool& isFeasible);
+
+ const GRState* Assume(const GRState* St, NonLoc Cond, bool Assumption,
+ bool& isFeasible);
+
+ const GRState* AssumeAux(const GRState* St, NonLoc Cond, bool Assumption,
+ bool& isFeasible);
+
+ const GRState* AssumeSymInt(const GRState* St, bool Assumption,
+ const SymIntExpr *SE, bool& isFeasible);
+
+ virtual const GRState* AssumeSymNE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V,
+ bool& isFeasible) = 0;
+
+ virtual const GRState* AssumeSymEQ(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V,
+ bool& isFeasible) = 0;
+
+ virtual const GRState* AssumeSymLT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V,
+ bool& isFeasible) = 0;
+
+ virtual const GRState* AssumeSymGT(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V,
+ bool& isFeasible) = 0;
+
+ virtual const GRState* AssumeSymLE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V,
+ bool& isFeasible) = 0;
+
+ virtual const GRState* AssumeSymGE(const GRState* St, SymbolRef sym,
+ const llvm::APSInt& V,
+ bool& isFeasible) = 0;
+
+ const GRState* AssumeInBound(const GRState* St, SVal Idx, SVal UpperBound,
+ bool Assumption, bool& isFeasible);
+
+private:
+ BasicValueFactory& getBasicVals() { return StateMgr.getBasicVals(); }
+ SymbolManager& getSymbolManager() const { return StateMgr.getSymbolManager(); }
+};
+
+} // end clang namespace
+
+#endif
diff --git a/lib/Analysis/Store.cpp b/lib/Analysis/Store.cpp
new file mode 100644
index 0000000..13326ab
--- /dev/null
+++ b/lib/Analysis/Store.cpp
@@ -0,0 +1,110 @@
+//== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the types Store and StoreManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/Store.h"
+#include "clang/Analysis/PathSensitive/GRState.h"
+
+using namespace clang;
+
+StoreManager::StoreManager(GRStateManager &stateMgr)
+ : ValMgr(stateMgr.getValueManager()),
+ StateMgr(stateMgr),
+ MRMgr(ValMgr.getRegionManager()) {}
+
+StoreManager::CastResult
+StoreManager::CastRegion(const GRState* state, const MemRegion* R,
+ QualType CastToTy) {
+
+ ASTContext& Ctx = StateMgr.getContext();
+
+ // We need to know the real type of CastToTy.
+ QualType ToTy = Ctx.getCanonicalType(CastToTy);
+
+ // Return the same region if the region types are compatible.
+ if (const TypedRegion* TR = dyn_cast<TypedRegion>(R)) {
+ QualType Ta = Ctx.getCanonicalType(TR->getLocationType(Ctx));
+
+ if (Ta == ToTy)
+ return CastResult(state, R);
+ }
+
+ if (const PointerType* PTy = dyn_cast<PointerType>(ToTy.getTypePtr())) {
+ // Check if we are casting to 'void*'.
+ // FIXME: Handle arbitrary upcasts.
+ QualType Pointee = PTy->getPointeeType();
+ if (Pointee->isVoidType()) {
+
+ do {
+ if (const TypedViewRegion *TR = dyn_cast<TypedViewRegion>(R)) {
+ // Casts to void* removes TypedViewRegion. This happens when:
+ //
+ // void foo(void*);
+ // ...
+ // void bar() {
+ // int x;
+ // foo(&x);
+ // }
+ //
+ R = TR->removeViews();
+ continue;
+ }
+ else if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // Casts to void* also removes ElementRegions. This happens when:
+ //
+ // void foo(void*);
+ // ...
+ // void bar() {
+ // int x;
+ // foo((char*)&x);
+ // }
+ //
+ R = ER->getSuperRegion();
+ continue;
+ }
+ else
+ break;
+ }
+ while (0);
+
+ return CastResult(state, R);
+ }
+ else if (Pointee->isIntegerType()) {
+ // FIXME: At some point, it stands to reason that this 'dyn_cast' should
+ // become a 'cast' and that 'R' will always be a TypedRegion.
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(R)) {
+ // Check if we are casting to a region with an integer type. We now
+ // the types aren't the same, so we construct an ElementRegion.
+ SVal Idx = ValMgr.makeZeroArrayIndex();
+
+ // If the super region is an element region, strip it away.
+ // FIXME: Is this the right thing to do in all cases?
+ const TypedRegion *Base = isa<ElementRegion>(TR) ?
+ cast<TypedRegion>(TR->getSuperRegion()) : TR;
+ ElementRegion* ER = MRMgr.getElementRegion(Pointee, Idx, Base);
+ return CastResult(state, ER);
+ }
+ }
+ }
+
+ // FIXME: Need to handle arbitrary downcasts.
+ // FIXME: Handle the case where a TypedViewRegion (layering a SymbolicRegion
+ // or an AllocaRegion is cast to another view, thus causing the memory
+ // to be re-used for a different purpose.
+
+ if (isa<SymbolicRegion>(R) || isa<AllocaRegion>(R)) {
+ const MemRegion* ViewR = MRMgr.getTypedViewRegion(CastToTy, R);
+ return CastResult(AddRegionView(state, ViewR, R), ViewR);
+ }
+
+ return CastResult(state, R);
+}
diff --git a/lib/Analysis/SymbolManager.cpp b/lib/Analysis/SymbolManager.cpp
new file mode 100644
index 0000000..5c885cd
--- /dev/null
+++ b/lib/Analysis/SymbolManager.cpp
@@ -0,0 +1,203 @@
+//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SymbolManager, a class that manages symbolic values
+// created for use by GRExprEngine and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/PathSensitive/SymbolManager.h"
+#include "clang/Analysis/PathSensitive/MemRegion.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+static void print(llvm::raw_ostream& os, const SymExpr *SE);
+
+static void print(llvm::raw_ostream& os, BinaryOperator::Opcode Op) {
+ switch (Op) {
+ default:
+ assert(false && "operator printing not implemented");
+ break;
+ case BinaryOperator::Mul: os << '*' ; break;
+ case BinaryOperator::Div: os << '/' ; break;
+ case BinaryOperator::Rem: os << '%' ; break;
+ case BinaryOperator::Add: os << '+' ; break;
+ case BinaryOperator::Sub: os << '-' ; break;
+ case BinaryOperator::Shl: os << "<<" ; break;
+ case BinaryOperator::Shr: os << ">>" ; break;
+ case BinaryOperator::LT: os << "<" ; break;
+ case BinaryOperator::GT: os << '>' ; break;
+ case BinaryOperator::LE: os << "<=" ; break;
+ case BinaryOperator::GE: os << ">=" ; break;
+ case BinaryOperator::EQ: os << "==" ; break;
+ case BinaryOperator::NE: os << "!=" ; break;
+ case BinaryOperator::And: os << '&' ; break;
+ case BinaryOperator::Xor: os << '^' ; break;
+ case BinaryOperator::Or: os << '|' ; break;
+ }
+}
+
+static void print(llvm::raw_ostream& os, const SymIntExpr *SE) {
+ os << '(';
+ print(os, SE->getLHS());
+ os << ") ";
+ print(os, SE->getOpcode());
+ os << ' ' << SE->getRHS().getZExtValue();
+ if (SE->getRHS().isUnsigned()) os << 'U';
+}
+
+static void print(llvm::raw_ostream& os, const SymSymExpr *SE) {
+ os << '(';
+ print(os, SE->getLHS());
+ os << ") ";
+ os << '(';
+ print(os, SE->getRHS());
+ os << ')';
+}
+
+static void print(llvm::raw_ostream& os, const SymExpr *SE) {
+ switch (SE->getKind()) {
+ case SymExpr::BEGIN_SYMBOLS:
+ case SymExpr::RegionValueKind:
+ case SymExpr::ConjuredKind:
+ case SymExpr::END_SYMBOLS:
+ os << '$' << cast<SymbolData>(SE)->getSymbolID();
+ return;
+ case SymExpr::SymIntKind:
+ print(os, cast<SymIntExpr>(SE));
+ return;
+ case SymExpr::SymSymKind:
+ print(os, cast<SymSymExpr>(SE));
+ return;
+ }
+}
+
+
+llvm::raw_ostream& llvm::operator<<(llvm::raw_ostream& os, const SymExpr *SE) {
+ print(os, SE);
+ return os;
+}
+
+std::ostream& std::operator<<(std::ostream& os, const SymExpr *SE) {
+ llvm::raw_os_ostream O(os);
+ print(O, SE);
+ return os;
+}
+
+const SymbolRegionValue*
+SymbolManager::getRegionValueSymbol(const MemRegion* R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolRegionValue::Profile(profile, R);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
+ new (SD) SymbolRegionValue(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolRegionValue>(SD);
+}
+
+const SymbolConjured*
+SymbolManager::getConjuredSymbol(const Stmt* E, QualType T, unsigned Count,
+ const void* SymbolTag) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolConjured::Profile(profile, E, T, Count, SymbolTag);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
+ new (SD) SymbolConjured(SymbolCounter, E, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolConjured>(SD);
+}
+
+const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& v,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymIntExpr::Profile(ID, lhs, op, v, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>();
+ new (data) SymIntExpr(lhs, op, v, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymIntExpr>(data);
+}
+
+const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymSymExpr::Profile(ID, lhs, op, rhs, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>();
+ new (data) SymSymExpr(lhs, op, rhs, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymSymExpr>(data);
+}
+
+QualType SymbolConjured::getType(ASTContext&) const {
+ return T;
+}
+
+QualType SymbolRegionValue::getType(ASTContext& C) const {
+ if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
+ return TR->getValueType(C);
+
+ return QualType();
+}
+
+SymbolManager::~SymbolManager() {}
+
+bool SymbolManager::canSymbolicate(QualType T) {
+ return Loc::IsLocType(T) || T->isIntegerType();
+}
+
+void SymbolReaper::markLive(SymbolRef sym) {
+ TheLiving = F.Add(TheLiving, sym);
+ TheDead = F.Remove(TheDead, sym);
+}
+
+bool SymbolReaper::maybeDead(SymbolRef sym) {
+ if (isLive(sym))
+ return false;
+
+ TheDead = F.Add(TheDead, sym);
+ return true;
+}
+
+bool SymbolReaper::isLive(SymbolRef sym) {
+ if (TheLiving.contains(sym))
+ return true;
+
+ // Interogate the symbol. It may derive from an input value to
+ // the analyzed function/method.
+ return isa<SymbolRegionValue>(sym);
+}
+
+SymbolVisitor::~SymbolVisitor() {}
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
new file mode 100644
index 0000000..014ea82
--- /dev/null
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -0,0 +1,312 @@
+//==- UninitializedValues.cpp - Find Uninitialized Values -------*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Uninitialized Values analysis for source-level CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/AnalysisDiagnostic.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowSolver.h"
+#include "llvm/Support/Compiler.h"
+
+#include "llvm/ADT/SmallPtrSet.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Dataflow initialization logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN RegisterDecls
+ : public CFGRecStmtDeclVisitor<RegisterDecls> {
+
+ UninitializedValues::AnalysisDataTy& AD;
+public:
+ RegisterDecls(UninitializedValues::AnalysisDataTy& ad) : AD(ad) {}
+
+ void VisitVarDecl(VarDecl* VD) { AD.Register(VD); }
+ CFG& getCFG() { return AD.getCFG(); }
+};
+
+} // end anonymous namespace
+
+void UninitializedValues::InitializeValues(const CFG& cfg) {
+ RegisterDecls R(getAnalysisData());
+ cfg.VisitBlockStmts(R);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN TransferFuncs
+ : public CFGStmtVisitor<TransferFuncs,bool> {
+
+ UninitializedValues::ValTy V;
+ UninitializedValues::AnalysisDataTy& AD;
+public:
+ TransferFuncs(UninitializedValues::AnalysisDataTy& ad) : AD(ad) {}
+
+ UninitializedValues::ValTy& getVal() { return V; }
+ CFG& getCFG() { return AD.getCFG(); }
+
+ void SetTopValue(UninitializedValues::ValTy& X) {
+ X.setDeclValues(AD);
+ X.resetBlkExprValues(AD);
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr* DR);
+ bool VisitBinaryOperator(BinaryOperator* B);
+ bool VisitUnaryOperator(UnaryOperator* U);
+ bool VisitStmt(Stmt* S);
+ bool VisitCallExpr(CallExpr* C);
+ bool VisitDeclStmt(DeclStmt* D);
+ bool VisitConditionalOperator(ConditionalOperator* C);
+ bool BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S);
+
+ bool Visit(Stmt *S);
+ bool BlockStmt_VisitExpr(Expr* E);
+
+ void VisitTerminator(CFGBlock* B) { }
+};
+
+static const bool Initialized = false;
+static const bool Uninitialized = true;
+
+bool TransferFuncs::VisitDeclRefExpr(DeclRefExpr* DR) {
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VD->isBlockVarDecl()) {
+
+ if (AD.Observer)
+ AD.Observer->ObserveDeclRefExpr(V, AD, DR, VD);
+
+ // Pseudo-hack to prevent cascade of warnings. If an accessed variable
+ // is uninitialized, then we are already going to flag a warning for
+ // this variable, which a "source" of uninitialized values.
+ // We can otherwise do a full "taint" of uninitialized values. The
+ // client has both options by toggling AD.FullUninitTaint.
+
+ if (AD.FullUninitTaint)
+ return V(VD,AD);
+ }
+
+ return Initialized;
+}
+
+static VarDecl* FindBlockVarDecl(Expr* E) {
+
+ // Blast through casts and parentheses to find any DeclRefExprs that
+ // refer to a block VarDecl.
+
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (VarDecl* VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VD->isBlockVarDecl()) return VD;
+
+ return NULL;
+}
+
+bool TransferFuncs::VisitBinaryOperator(BinaryOperator* B) {
+
+ if (VarDecl* VD = FindBlockVarDecl(B->getLHS()))
+ if (B->isAssignmentOp()) {
+ if (B->getOpcode() == BinaryOperator::Assign)
+ return V(VD,AD) = Visit(B->getRHS());
+ else // Handle +=, -=, *=, etc. We do want '&', not '&&'.
+ return V(VD,AD) = Visit(B->getLHS()) & Visit(B->getRHS());
+ }
+
+ return VisitStmt(B);
+}
+
+bool TransferFuncs::VisitDeclStmt(DeclStmt* S) {
+ for (DeclStmt::decl_iterator I=S->decl_begin(), E=S->decl_end(); I!=E; ++I) {
+ VarDecl *VD = dyn_cast<VarDecl>(*I);
+ if (VD && VD->isBlockVarDecl()) {
+ if (Stmt* I = VD->getInit())
+ V(VD,AD) = AD.FullUninitTaint ? V(cast<Expr>(I),AD) : Initialized;
+ else {
+ // Special case for declarations of array types. For things like:
+ //
+ // char x[10];
+ //
+ // we should treat "x" as being initialized, because the variable
+ // "x" really refers to the memory block. Clearly x[1] is
+ // uninitialized, but expressions like "(char *) x" really do refer to
+ // an initialized value. This simple dataflow analysis does not reason
+ // about the contents of arrays, although it could be potentially
+ // extended to do so if the array were of constant size.
+ if (VD->getType()->isArrayType())
+ V(VD,AD) = Initialized;
+ else
+ V(VD,AD) = Uninitialized;
+ }
+ }
+ }
+ return Uninitialized; // Value is never consumed.
+}
+
+bool TransferFuncs::VisitCallExpr(CallExpr* C) {
+ VisitChildren(C);
+ return Initialized;
+}
+
+bool TransferFuncs::VisitUnaryOperator(UnaryOperator* U) {
+ switch (U->getOpcode()) {
+ case UnaryOperator::AddrOf: {
+ VarDecl* VD = FindBlockVarDecl(U->getSubExpr());
+ if (VD && VD->isBlockVarDecl())
+ return V(VD,AD) = Initialized;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return Visit(U->getSubExpr());
+}
+
+bool
+TransferFuncs::BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt* S) {
+ // This represents a use of the 'collection'
+ bool x = Visit(S->getCollection());
+
+ if (x == Uninitialized)
+ return Uninitialized;
+
+ // This represents an initialization of the 'element' value.
+ Stmt* Element = S->getElement();
+ VarDecl* VD = 0;
+
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(Element))
+ VD = cast<VarDecl>(DS->getSingleDecl());
+ else {
+ Expr* ElemExpr = cast<Expr>(Element)->IgnoreParens();
+
+ // Initialize the value of the reference variable.
+ if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(ElemExpr))
+ VD = cast<VarDecl>(DR->getDecl());
+ else
+ return Visit(ElemExpr);
+ }
+
+ V(VD,AD) = Initialized;
+ return Initialized;
+}
+
+
+bool TransferFuncs::VisitConditionalOperator(ConditionalOperator* C) {
+ Visit(C->getCond());
+
+ bool rhsResult = Visit(C->getRHS());
+ // Handle the GNU extension for missing LHS.
+ if (Expr *lhs = C->getLHS())
+ return Visit(lhs) & rhsResult; // Yes: we want &, not &&.
+ else
+ return rhsResult;
+}
+
+bool TransferFuncs::VisitStmt(Stmt* S) {
+ bool x = Initialized;
+
+ // We don't stop at the first subexpression that is Uninitialized because
+ // evaluating some subexpressions may result in propogating "Uninitialized"
+ // or "Initialized" to variables referenced in the other subexpressions.
+ for (Stmt::child_iterator I=S->child_begin(), E=S->child_end(); I!=E; ++I)
+ if (*I && Visit(*I) == Uninitialized) x = Uninitialized;
+
+ return x;
+}
+
+bool TransferFuncs::Visit(Stmt *S) {
+ if (AD.isTracked(static_cast<Expr*>(S))) return V(static_cast<Expr*>(S),AD);
+ else return static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(S);
+}
+
+bool TransferFuncs::BlockStmt_VisitExpr(Expr* E) {
+ bool x = static_cast<CFGStmtVisitor<TransferFuncs,bool>*>(this)->Visit(E);
+ if (AD.isTracked(E)) V(E,AD) = x;
+ return x;
+}
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Merge operator.
+//
+// In our transfer functions we take the approach that any
+// combination of uninitialized values, e.g.
+// Uninitialized + ___ = Uninitialized.
+//
+// Merges take the same approach, preferring soundness. At a confluence point,
+// if any predecessor has a variable marked uninitialized, the value is
+// uninitialized at the confluence point.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ typedef StmtDeclBitVector_Types::Union Merge;
+ typedef DataflowSolver<UninitializedValues,TransferFuncs,Merge> Solver;
+}
+
+//===----------------------------------------------------------------------===//
+// Uninitialized values checker. Scan an AST and flag variable uses
+//===----------------------------------------------------------------------===//
+
+UninitializedValues_ValueTypes::ObserverTy::~ObserverTy() {}
+
+namespace {
+class VISIBILITY_HIDDEN UninitializedValuesChecker
+ : public UninitializedValues::ObserverTy {
+
+ ASTContext &Ctx;
+ Diagnostic &Diags;
+ llvm::SmallPtrSet<VarDecl*,10> AlreadyWarned;
+
+public:
+ UninitializedValuesChecker(ASTContext &ctx, Diagnostic &diags)
+ : Ctx(ctx), Diags(diags) {}
+
+ virtual void ObserveDeclRefExpr(UninitializedValues::ValTy& V,
+ UninitializedValues::AnalysisDataTy& AD,
+ DeclRefExpr* DR, VarDecl* VD) {
+
+ assert ( AD.isTracked(VD) && "Unknown VarDecl.");
+
+ if (V(VD,AD) == Uninitialized)
+ if (AlreadyWarned.insert(VD))
+ Diags.Report(Ctx.getFullLoc(DR->getSourceRange().getBegin()),
+ diag::warn_uninit_val);
+ }
+};
+} // end anonymous namespace
+
+namespace clang {
+void CheckUninitializedValues(CFG& cfg, ASTContext &Ctx, Diagnostic &Diags,
+ bool FullUninitTaint) {
+
+ // Compute the uninitialized values information.
+ UninitializedValues U(cfg);
+ U.getAnalysisData().FullUninitTaint = FullUninitTaint;
+ Solver S(U);
+ S.runOnCFG(cfg);
+
+ // Scan for DeclRefExprs that use uninitialized values.
+ UninitializedValuesChecker Observer(Ctx,Diags);
+ U.getAnalysisData().Observer = &Observer;
+ S.runOnAllBlocks(cfg);
+}
+} // end namespace clang
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
new file mode 100644
index 0000000..1cbf11c
--- /dev/null
+++ b/lib/Basic/CMakeLists.txt
@@ -0,0 +1,24 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangBasic
+ ConvertUTF.c
+ Diagnostic.cpp
+ FileManager.cpp
+ IdentifierTable.cpp
+ SourceLocation.cpp
+ SourceManager.cpp
+ TargetInfo.cpp
+ Targets.cpp
+ TokenKinds.cpp
+ )
+
+add_dependencies(clangBasic
+ ClangDiagnosticAnalysis
+ ClangDiagnosticAST
+ ClangDiagnosticCommon
+ ClangDiagnosticDriver
+ ClangDiagnosticFrontend
+ ClangDiagnosticGroups
+ ClangDiagnosticLex
+ ClangDiagnosticParse
+ ClangDiagnosticSema)
diff --git a/lib/Basic/ConvertUTF.c b/lib/Basic/ConvertUTF.c
new file mode 100644
index 0000000..e5dd3e6
--- /dev/null
+++ b/lib/Basic/ConvertUTF.c
@@ -0,0 +1,547 @@
+/*===--- ConvertUTF.c - Universal Character Names conversions ---------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===------------------------------------------------------------------------=*/
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ *
+ * Disclaimer
+ *
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ *
+ * Limitations on Rights to Redistribute This Code
+ *
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+ Conversions between UTF32, UTF-16, and UTF-8. Source code file.
+ Author: Mark E. Davis, 1994.
+ Rev History: Rick McGowan, fixes & updates May 2001.
+ Sept 2001: fixed const & error conditions per
+ mods suggested by S. Parent & A. Lillich.
+ June 2002: Tim Dodd added detection and handling of incomplete
+ source sequences, enhanced error detection, added casts
+ to eliminate compiler warnings.
+ July 2003: slight mods to back out aggressive FFFE detection.
+ Jan 2004: updated switches in from-UTF8 conversions.
+ Oct 2004: updated to use UNI_MAX_LEGAL_UTF32 in UTF-32 conversions.
+
+ See the header file "ConvertUTF.h" for complete documentation.
+
+------------------------------------------------------------------------ */
+
+
+#include "clang/Basic/ConvertUTF.h"
+#ifdef CVTUTF_DEBUG
+#include <stdio.h>
+#endif
+
+static const int halfShift = 10; /* used for shifting by 10 bits */
+
+static const UTF32 halfBase = 0x0010000UL;
+static const UTF32 halfMask = 0x3FFUL;
+
+#define UNI_SUR_HIGH_START (UTF32)0xD800
+#define UNI_SUR_HIGH_END (UTF32)0xDBFF
+#define UNI_SUR_LOW_START (UTF32)0xDC00
+#define UNI_SUR_LOW_END (UTF32)0xDFFF
+#define false 0
+#define true 1
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Index into the table below with the first byte of a UTF-8 sequence to
+ * get the number of trailing bytes that are supposed to follow it.
+ * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
+ * left as-is for anyone who may want to do such conversion, which was
+ * allowed in earlier algorithms.
+ */
+static const char trailingBytesForUTF8[256] = {
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
+};
+
+/*
+ * Magic values subtracted from a buffer value during UTF8 conversion.
+ * This table contains as many values as there might be trailing bytes
+ * in a UTF-8 sequence.
+ */
+static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
+ 0x03C82080UL, 0xFA082080UL, 0x82082080UL };
+
+/*
+ * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
+ * into the first byte, depending on how many bytes follow. There are
+ * as many entries in this table as there are UTF-8 sequence types.
+ * (I.e., one byte sequence, two byte... etc.). Remember that sequencs
+ * for *legal* UTF-8 will be 4 or fewer bytes total.
+ */
+static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
+
+/* --------------------------------------------------------------------- */
+
+/* The interface converts a whole buffer to avoid function-call overhead.
+ * Constants have been gathered. Loops & conditionals have been removed as
+ * much as possible for efficiency, in favor of drop-through switches.
+ * (See "Note A" at the bottom of the file for equivalent code.)
+ * If your compiler supports it, the "isLegalUTF8" call can be turned
+ * into an inline function.
+ */
+
+#ifdef CLANG_NEEDS_THESE_ONE_DAY
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF32toUTF16 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF32* source = *sourceStart;
+ UTF16* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch;
+ if (target >= targetEnd) {
+ result = targetExhausted; break;
+ }
+ ch = *source++;
+ if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
+ /* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ if (flags == strictConversion) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ *target++ = (UTF16)ch; /* normal case */
+ }
+ } else if (ch > UNI_MAX_LEGAL_UTF32) {
+ if (flags == strictConversion) {
+ result = sourceIllegal;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ /* target is a character in range 0xFFFF - 0x10FFFF. */
+ if (target + 1 >= targetEnd) {
+ --source; /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ ch -= halfBase;
+ *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
+ *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF16toUTF32 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF16* source = *sourceStart;
+ UTF32* target = *targetStart;
+ UTF32 ch, ch2;
+ while (source < sourceEnd) {
+ const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */
+ ch = *source++;
+ /* If we have a surrogate pair, convert to UTF32 first. */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+ /* If the 16 bits following the high surrogate are in the source buffer... */
+ if (source < sourceEnd) {
+ ch2 = *source;
+ /* If it's a low surrogate, convert to UTF32. */
+ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
+ ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+ + (ch2 - UNI_SUR_LOW_START) + halfBase;
+ ++source;
+ } else if (flags == strictConversion) { /* it's an unpaired high surrogate */
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ } else { /* We don't have the 16 bits following the high surrogate. */
+ --source; /* return to the high surrogate */
+ result = sourceExhausted;
+ break;
+ }
+ } else if (flags == strictConversion) {
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ }
+ if (target >= targetEnd) {
+ source = oldSource; /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ *target++ = ch;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+#ifdef CVTUTF_DEBUG
+if (result == sourceIllegal) {
+ fprintf(stderr, "ConvertUTF16toUTF32 illegal seq 0x%04x,%04x\n", ch, ch2);
+ fflush(stderr);
+}
+#endif
+ return result;
+}
+ConversionResult ConvertUTF16toUTF8 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF16* source = *sourceStart;
+ UTF8* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch;
+ unsigned short bytesToWrite = 0;
+ const UTF32 byteMask = 0xBF;
+ const UTF32 byteMark = 0x80;
+ const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */
+ ch = *source++;
+ /* If we have a surrogate pair, convert to UTF32 first. */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+ /* If the 16 bits following the high surrogate are in the source buffer... */
+ if (source < sourceEnd) {
+ UTF32 ch2 = *source;
+ /* If it's a low surrogate, convert to UTF32. */
+ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
+ ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+ + (ch2 - UNI_SUR_LOW_START) + halfBase;
+ ++source;
+ } else if (flags == strictConversion) { /* it's an unpaired high surrogate */
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ } else { /* We don't have the 16 bits following the high surrogate. */
+ --source; /* return to the high surrogate */
+ result = sourceExhausted;
+ break;
+ }
+ } else if (flags == strictConversion) {
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ }
+ /* Figure out how many bytes the result will require */
+ if (ch < (UTF32)0x80) { bytesToWrite = 1;
+ } else if (ch < (UTF32)0x800) { bytesToWrite = 2;
+ } else if (ch < (UTF32)0x10000) { bytesToWrite = 3;
+ } else if (ch < (UTF32)0x110000) { bytesToWrite = 4;
+ } else { bytesToWrite = 3;
+ ch = UNI_REPLACEMENT_CHAR;
+ }
+
+ target += bytesToWrite;
+ if (target > targetEnd) {
+ source = oldSource; /* Back up source pointer! */
+ target -= bytesToWrite; result = targetExhausted; break;
+ }
+ switch (bytesToWrite) { /* note: everything falls through. */
+ case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 1: *--target = (UTF8)(ch | firstByteMark[bytesToWrite]);
+ }
+ target += bytesToWrite;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF32toUTF8 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF32* source = *sourceStart;
+ UTF8* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch;
+ unsigned short bytesToWrite = 0;
+ const UTF32 byteMask = 0xBF;
+ const UTF32 byteMark = 0x80;
+ ch = *source++;
+ if (flags == strictConversion ) {
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ }
+ /*
+ * Figure out how many bytes the result will require. Turn any
+ * illegally large UTF32 things (> Plane 17) into replacement chars.
+ */
+ if (ch < (UTF32)0x80) { bytesToWrite = 1;
+ } else if (ch < (UTF32)0x800) { bytesToWrite = 2;
+ } else if (ch < (UTF32)0x10000) { bytesToWrite = 3;
+ } else if (ch <= UNI_MAX_LEGAL_UTF32) { bytesToWrite = 4;
+ } else { bytesToWrite = 3;
+ ch = UNI_REPLACEMENT_CHAR;
+ result = sourceIllegal;
+ }
+
+ target += bytesToWrite;
+ if (target > targetEnd) {
+ --source; /* Back up source pointer! */
+ target -= bytesToWrite; result = targetExhausted; break;
+ }
+ switch (bytesToWrite) { /* note: everything falls through. */
+ case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]);
+ }
+ target += bytesToWrite;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF8toUTF32 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF8* source = *sourceStart;
+ UTF32* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch = 0;
+ unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
+ if (source + extraBytesToRead >= sourceEnd) {
+ result = sourceExhausted; break;
+ }
+ /* Do this check whether lenient or strict */
+ if (!isLegalUTF8(source, extraBytesToRead+1)) {
+ result = sourceIllegal;
+ break;
+ }
+ /*
+ * The cases all fall through. See "Note A" below.
+ */
+ switch (extraBytesToRead) {
+ case 5: ch += *source++; ch <<= 6;
+ case 4: ch += *source++; ch <<= 6;
+ case 3: ch += *source++; ch <<= 6;
+ case 2: ch += *source++; ch <<= 6;
+ case 1: ch += *source++; ch <<= 6;
+ case 0: ch += *source++;
+ }
+ ch -= offsetsFromUTF8[extraBytesToRead];
+
+ if (target >= targetEnd) {
+ source -= (extraBytesToRead+1); /* Back up the source pointer! */
+ result = targetExhausted; break;
+ }
+ if (ch <= UNI_MAX_LEGAL_UTF32) {
+ /*
+ * UTF-16 surrogate values are illegal in UTF-32, and anything
+ * over Plane 17 (> 0x10FFFF) is illegal.
+ */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ if (flags == strictConversion) {
+ source -= (extraBytesToRead+1); /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ *target++ = ch;
+ }
+ } else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */
+ result = sourceIllegal;
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Utility routine to tell whether a sequence of bytes is legal UTF-8.
+ * This must be called with the length pre-determined by the first byte.
+ * If not calling this from ConvertUTF8to*, then the length can be set by:
+ * length = trailingBytesForUTF8[*source]+1;
+ * and the sequence is illegal right away if there aren't that many bytes
+ * available.
+ * If presented with a length > 4, this returns false. The Unicode
+ * definition of UTF-8 goes up to 4-byte sequences.
+ */
+
+static Boolean isLegalUTF8(const UTF8 *source, int length) {
+ UTF8 a;
+ const UTF8 *srcptr = source+length;
+ switch (length) {
+ default: return false;
+ /* Everything else falls through when "true"... */
+ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 2: if ((a = (*--srcptr)) > 0xBF) return false;
+
+ switch (*source) {
+ /* no fall-through in this inner switch */
+ case 0xE0: if (a < 0xA0) return false; break;
+ case 0xED: if (a > 0x9F) return false; break;
+ case 0xF0: if (a < 0x90) return false; break;
+ case 0xF4: if (a > 0x8F) return false; break;
+ default: if (a < 0x80) return false;
+ }
+
+ case 1: if (*source >= 0x80 && *source < 0xC2) return false;
+ }
+ if (*source > 0xF4) return false;
+ return true;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Exported function to return whether a UTF-8 sequence is legal or not.
+ * This is not used here; it's just exported.
+ */
+Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
+ int length = trailingBytesForUTF8[*source]+1;
+ if (source+length > sourceEnd) {
+ return false;
+ }
+ return isLegalUTF8(source, length);
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF8toUTF16 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF8* source = *sourceStart;
+ UTF16* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch = 0;
+ unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
+ if (source + extraBytesToRead >= sourceEnd) {
+ result = sourceExhausted; break;
+ }
+ /* Do this check whether lenient or strict */
+ if (!isLegalUTF8(source, extraBytesToRead+1)) {
+ result = sourceIllegal;
+ break;
+ }
+ /*
+ * The cases all fall through. See "Note A" below.
+ */
+ switch (extraBytesToRead) {
+ case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
+ case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
+ case 3: ch += *source++; ch <<= 6;
+ case 2: ch += *source++; ch <<= 6;
+ case 1: ch += *source++; ch <<= 6;
+ case 0: ch += *source++;
+ }
+ ch -= offsetsFromUTF8[extraBytesToRead];
+
+ if (target >= targetEnd) {
+ source -= (extraBytesToRead+1); /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ if (flags == strictConversion) {
+ source -= (extraBytesToRead+1); /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ *target++ = (UTF16)ch; /* normal case */
+ }
+ } else if (ch > UNI_MAX_UTF16) {
+ if (flags == strictConversion) {
+ result = sourceIllegal;
+ source -= (extraBytesToRead+1); /* return to the start */
+ break; /* Bail out; shouldn't continue */
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ /* target is a character in range 0xFFFF - 0x10FFFF. */
+ if (target + 1 >= targetEnd) {
+ source -= (extraBytesToRead+1); /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ ch -= halfBase;
+ *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
+ *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* ---------------------------------------------------------------------
+
+ Note A.
+ The fall-through switches in UTF-8 reading code save a
+ temp variable, some decrements & conditionals. The switches
+ are equivalent to the following loop:
+ {
+ int tmpBytesToRead = extraBytesToRead+1;
+ do {
+ ch += *source++;
+ --tmpBytesToRead;
+ if (tmpBytesToRead) ch <<= 6;
+ } while (tmpBytesToRead > 0);
+ }
+ In UTF-8 writing code, the switches on "bytesToWrite" are
+ similarly unrolled loops.
+
+ --------------------------------------------------------------------- */
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
new file mode 100644
index 0000000..3b3d61b
--- /dev/null
+++ b/lib/Basic/Diagnostic.cpp
@@ -0,0 +1,788 @@
+//===--- Diagnostic.cpp - C Language Family Diagnostic Handling -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Diagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Analysis/AnalysisDiagnostic.h"
+#include "clang/Driver/DriverDiagnostic.h"
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include <vector>
+#include <map>
+#include <cstring>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Builtin Diagnostic information
+//===----------------------------------------------------------------------===//
+
+// Diagnostic classes.
+enum {
+ CLASS_NOTE = 0x01,
+ CLASS_WARNING = 0x02,
+ CLASS_EXTENSION = 0x03,
+ CLASS_ERROR = 0x04
+};
+
+struct StaticDiagInfoRec {
+ unsigned short DiagID;
+ unsigned Mapping : 3;
+ unsigned Class : 3;
+ const char *Description;
+ const char *OptionGroup;
+
+ bool operator<(const StaticDiagInfoRec &RHS) const {
+ return DiagID < RHS.DiagID;
+ }
+ bool operator>(const StaticDiagInfoRec &RHS) const {
+ return DiagID > RHS.DiagID;
+ }
+};
+
+static const StaticDiagInfoRec StaticDiagInfo[] = {
+#define DIAG(ENUM,CLASS,DEFAULT_MAPPING,DESC,GROUP) \
+ { diag::ENUM, DEFAULT_MAPPING, CLASS, DESC, GROUP },
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+{ 0, 0, 0, 0, 0 }
+};
+#undef DIAG
+
+/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
+/// or null if the ID is invalid.
+static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
+ unsigned NumDiagEntries = sizeof(StaticDiagInfo)/sizeof(StaticDiagInfo[0])-1;
+
+ // If assertions are enabled, verify that the StaticDiagInfo array is sorted.
+#ifndef NDEBUG
+ static bool IsFirst = true;
+ if (IsFirst) {
+ for (unsigned i = 1; i != NumDiagEntries; ++i)
+ assert(StaticDiagInfo[i-1] < StaticDiagInfo[i] &&
+ "Improperly sorted diag info");
+ IsFirst = false;
+ }
+#endif
+
+ // Search the diagnostic table with a binary search.
+ StaticDiagInfoRec Find = { DiagID, 0, 0, 0, 0 };
+
+ const StaticDiagInfoRec *Found =
+ std::lower_bound(StaticDiagInfo, StaticDiagInfo + NumDiagEntries, Find);
+ if (Found == StaticDiagInfo + NumDiagEntries ||
+ Found->DiagID != DiagID)
+ return 0;
+
+ return Found;
+}
+
+static unsigned GetDefaultDiagMapping(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Mapping;
+ return diag::MAP_FATAL;
+}
+
+/// getWarningOptionForDiag - Return the lowest-level warning option that
+/// enables the specified diagnostic. If there is no -Wfoo flag that controls
+/// the diagnostic, this returns null.
+const char *Diagnostic::getWarningOptionForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->OptionGroup;
+ return 0;
+}
+
+/// getDiagClass - Return the class field of the diagnostic.
+///
+static unsigned getBuiltinDiagClass(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Class;
+ return ~0U;
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Diagnostic information
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ namespace diag {
+ class CustomDiagInfo {
+ typedef std::pair<Diagnostic::Level, std::string> DiagDesc;
+ std::vector<DiagDesc> DiagInfo;
+ std::map<DiagDesc, unsigned> DiagIDs;
+ public:
+
+ /// getDescription - Return the description of the specified custom
+ /// diagnostic.
+ const char *getDescription(unsigned DiagID) const {
+ assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnosic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].second.c_str();
+ }
+
+ /// getLevel - Return the level of the specified custom diagnostic.
+ Diagnostic::Level getLevel(unsigned DiagID) const {
+ assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnosic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].first;
+ }
+
+ unsigned getOrCreateDiagID(Diagnostic::Level L, const char *Message,
+ Diagnostic &Diags) {
+ DiagDesc D(L, Message);
+ // Check to see if it already exists.
+ std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D);
+ if (I != DiagIDs.end() && I->first == D)
+ return I->second;
+
+ // If not, assign a new ID.
+ unsigned ID = DiagInfo.size()+DIAG_UPPER_LIMIT;
+ DiagIDs.insert(std::make_pair(D, ID));
+ DiagInfo.push_back(D);
+ return ID;
+ }
+ };
+
+ } // end diag namespace
+} // end clang namespace
+
+
+//===----------------------------------------------------------------------===//
+// Common Diagnostic implementation
+//===----------------------------------------------------------------------===//
+
+static void DummyArgToStringFn(Diagnostic::ArgumentKind AK, intptr_t QT,
+ const char *Modifier, unsigned ML,
+ const char *Argument, unsigned ArgLen,
+ llvm::SmallVectorImpl<char> &Output,
+ void *Cookie) {
+ const char *Str = "<can't format argument>";
+ Output.append(Str, Str+strlen(Str));
+}
+
+
+Diagnostic::Diagnostic(DiagnosticClient *client) : Client(client) {
+ AllExtensionsSilenced = 0;
+ IgnoreAllWarnings = false;
+ WarningsAsErrors = false;
+ SuppressSystemWarnings = false;
+ ExtBehavior = Ext_Ignore;
+
+ ErrorOccurred = false;
+ FatalErrorOccurred = false;
+ NumDiagnostics = 0;
+ NumErrors = 0;
+ CustomDiagInfo = 0;
+ CurDiagID = ~0U;
+ LastDiagLevel = Ignored;
+
+ ArgToStringFn = DummyArgToStringFn;
+ ArgToStringCookie = 0;
+
+ // Set all mappings to 'unset'.
+ memset(DiagMappings, 0, sizeof(DiagMappings));
+}
+
+Diagnostic::~Diagnostic() {
+ delete CustomDiagInfo;
+}
+
+/// getCustomDiagID - Return an ID for a diagnostic with the specified message
+/// and level. If this is the first request for this diagnosic, it is
+/// registered and created, otherwise the existing ID is returned.
+unsigned Diagnostic::getCustomDiagID(Level L, const char *Message) {
+ if (CustomDiagInfo == 0)
+ CustomDiagInfo = new diag::CustomDiagInfo();
+ return CustomDiagInfo->getOrCreateDiagID(L, Message, *this);
+}
+
+
+/// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic
+/// level of the specified diagnostic ID is a Warning or Extension.
+/// This only works on builtin diagnostics, not custom ones, and is not legal to
+/// call on NOTEs.
+bool Diagnostic::isBuiltinWarningOrExtension(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) != CLASS_ERROR;
+}
+
+/// \brief Determine whether the given built-in diagnostic ID is a
+/// Note.
+bool Diagnostic::isBuiltinNote(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) == CLASS_NOTE;
+}
+
+/// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+/// ID is for an extension of some sort.
+///
+bool Diagnostic::isBuiltinExtensionDiag(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) == CLASS_EXTENSION;
+}
+
+
+/// getDescription - Given a diagnostic ID, return a description of the
+/// issue.
+const char *Diagnostic::getDescription(unsigned DiagID) const {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Description;
+ return CustomDiagInfo->getDescription(DiagID);
+}
+
+/// getDiagnosticLevel - Based on the way the client configured the Diagnostic
+/// object, classify the specified diagnostic ID into a Level, consumable by
+/// the DiagnosticClient.
+Diagnostic::Level Diagnostic::getDiagnosticLevel(unsigned DiagID) const {
+ // Handle custom diagnostics, which cannot be mapped.
+ if (DiagID >= diag::DIAG_UPPER_LIMIT)
+ return CustomDiagInfo->getLevel(DiagID);
+
+ unsigned DiagClass = getBuiltinDiagClass(DiagID);
+ assert(DiagClass != CLASS_NOTE && "Cannot get diagnostic level of a note!");
+ return getDiagnosticLevel(DiagID, DiagClass);
+}
+
+/// getDiagnosticLevel - Based on the way the client configured the Diagnostic
+/// object, classify the specified diagnostic ID into a Level, consumable by
+/// the DiagnosticClient.
+Diagnostic::Level
+Diagnostic::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass) const {
+ // Specific non-error diagnostics may be mapped to various levels from ignored
+ // to error. Errors can only be mapped to fatal.
+ Diagnostic::Level Result = Diagnostic::Fatal;
+
+ // Get the mapping information, if unset, compute it lazily.
+ unsigned MappingInfo = getDiagnosticMappingInfo((diag::kind)DiagID);
+ if (MappingInfo == 0) {
+ MappingInfo = GetDefaultDiagMapping(DiagID);
+ setDiagnosticMappingInternal(DiagID, MappingInfo, false);
+ }
+
+ switch (MappingInfo & 7) {
+ default: assert(0 && "Unknown mapping!");
+ case diag::MAP_IGNORE:
+ // Ignore this, unless this is an extension diagnostic and we're mapping
+ // them onto warnings or errors.
+ if (!isBuiltinExtensionDiag(DiagID) || // Not an extension
+ ExtBehavior == Ext_Ignore || // Extensions ignored anyway
+ (MappingInfo & 8) != 0) // User explicitly mapped it.
+ return Diagnostic::Ignored;
+ Result = Diagnostic::Warning;
+ if (ExtBehavior == Ext_Error) Result = Diagnostic::Error;
+ break;
+ case diag::MAP_ERROR:
+ Result = Diagnostic::Error;
+ break;
+ case diag::MAP_FATAL:
+ Result = Diagnostic::Fatal;
+ break;
+ case diag::MAP_WARNING:
+ // If warnings are globally mapped to ignore or error, do it.
+ if (IgnoreAllWarnings)
+ return Diagnostic::Ignored;
+
+ Result = Diagnostic::Warning;
+
+ // If this is an extension diagnostic and we're in -pedantic-error mode, and
+ // if the user didn't explicitly map it, upgrade to an error.
+ if (ExtBehavior == Ext_Error &&
+ (MappingInfo & 8) == 0 &&
+ isBuiltinExtensionDiag(DiagID))
+ Result = Diagnostic::Error;
+
+ if (WarningsAsErrors)
+ Result = Diagnostic::Error;
+ break;
+
+ case diag::MAP_WARNING_NO_WERROR:
+ // Diagnostics specified with -Wno-error=foo should be set to warnings, but
+ // not be adjusted by -Werror or -pedantic-errors.
+ Result = Diagnostic::Warning;
+
+ // If warnings are globally mapped to ignore or error, do it.
+ if (IgnoreAllWarnings)
+ return Diagnostic::Ignored;
+
+ break;
+ }
+
+ // Okay, we're about to return this as a "diagnostic to emit" one last check:
+ // if this is any sort of extension warning, and if we're in an __extension__
+ // block, silence it.
+ if (AllExtensionsSilenced && isBuiltinExtensionDiag(DiagID))
+ return Diagnostic::Ignored;
+
+ return Result;
+}
+
+struct WarningOption {
+ const char *Name;
+ const short *Members;
+ const char *SubGroups;
+};
+
+#define GET_DIAG_ARRAYS
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_ARRAYS
+
+// Second the table of options, sorted by name for fast binary lookup.
+static const WarningOption OptionTable[] = {
+#define GET_DIAG_TABLE
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_TABLE
+};
+static const size_t OptionTableSize =
+sizeof(OptionTable) / sizeof(OptionTable[0]);
+
+static bool WarningOptionCompare(const WarningOption &LHS,
+ const WarningOption &RHS) {
+ return strcmp(LHS.Name, RHS.Name) < 0;
+}
+
+static void MapGroupMembers(const WarningOption *Group, diag::Mapping Mapping,
+ Diagnostic &Diags) {
+ // Option exists, poke all the members of its diagnostic set.
+ if (const short *Member = Group->Members) {
+ for (; *Member != -1; ++Member)
+ Diags.setDiagnosticMapping(*Member, Mapping);
+ }
+
+ // Enable/disable all subgroups along with this one.
+ if (const char *SubGroups = Group->SubGroups) {
+ for (; *SubGroups != (char)-1; ++SubGroups)
+ MapGroupMembers(&OptionTable[(unsigned char)*SubGroups], Mapping, Diags);
+ }
+}
+
+/// setDiagnosticGroupMapping - Change an entire diagnostic group (e.g.
+/// "unknown-pragmas" to have the specified mapping. This returns true and
+/// ignores the request if "Group" was unknown, false otherwise.
+bool Diagnostic::setDiagnosticGroupMapping(const char *Group,
+ diag::Mapping Map) {
+
+ WarningOption Key = { Group, 0, 0 };
+ const WarningOption *Found =
+ std::lower_bound(OptionTable, OptionTable + OptionTableSize, Key,
+ WarningOptionCompare);
+ if (Found == OptionTable + OptionTableSize ||
+ strcmp(Found->Name, Group) != 0)
+ return true; // Option not found.
+
+ MapGroupMembers(Found, Map, *this);
+ return false;
+}
+
+
+/// ProcessDiag - This is the method used to report a diagnostic that is
+/// finally fully formed.
+void Diagnostic::ProcessDiag() {
+ DiagnosticInfo Info(this);
+
+ // Figure out the diagnostic level of this message.
+ Diagnostic::Level DiagLevel;
+ unsigned DiagID = Info.getID();
+
+ // ShouldEmitInSystemHeader - True if this diagnostic should be produced even
+ // in a system header.
+ bool ShouldEmitInSystemHeader;
+
+ if (DiagID >= diag::DIAG_UPPER_LIMIT) {
+ // Handle custom diagnostics, which cannot be mapped.
+ DiagLevel = CustomDiagInfo->getLevel(DiagID);
+
+ // Custom diagnostics always are emitted in system headers.
+ ShouldEmitInSystemHeader = true;
+ } else {
+ // Get the class of the diagnostic. If this is a NOTE, map it onto whatever
+ // the diagnostic level was for the previous diagnostic so that it is
+ // filtered the same as the previous diagnostic.
+ unsigned DiagClass = getBuiltinDiagClass(DiagID);
+ if (DiagClass == CLASS_NOTE) {
+ DiagLevel = Diagnostic::Note;
+ ShouldEmitInSystemHeader = false; // extra consideration is needed
+ } else {
+ // If this is not an error and we are in a system header, we ignore it.
+ // Check the original Diag ID here, because we also want to ignore
+ // extensions and warnings in -Werror and -pedantic-errors modes, which
+ // *map* warnings/extensions to errors.
+ ShouldEmitInSystemHeader = DiagClass == CLASS_ERROR;
+
+ DiagLevel = getDiagnosticLevel(DiagID, DiagClass);
+ }
+ }
+
+ if (DiagLevel != Diagnostic::Note) {
+ // Record that a fatal error occurred only when we see a second
+ // non-note diagnostic. This allows notes to be attached to the
+ // fatal error, but suppresses any diagnostics that follow those
+ // notes.
+ if (LastDiagLevel == Diagnostic::Fatal)
+ FatalErrorOccurred = true;
+
+ LastDiagLevel = DiagLevel;
+ }
+
+ // If a fatal error has already been emitted, silence all subsequent
+ // diagnostics.
+ if (FatalErrorOccurred)
+ return;
+
+ // If the client doesn't care about this message, don't issue it. If this is
+ // a note and the last real diagnostic was ignored, ignore it too.
+ if (DiagLevel == Diagnostic::Ignored ||
+ (DiagLevel == Diagnostic::Note && LastDiagLevel == Diagnostic::Ignored))
+ return;
+
+ // If this diagnostic is in a system header and is not a clang error, suppress
+ // it.
+ if (SuppressSystemWarnings && !ShouldEmitInSystemHeader &&
+ Info.getLocation().isValid() &&
+ Info.getLocation().getSpellingLoc().isInSystemHeader() &&
+ (DiagLevel != Diagnostic::Note || LastDiagLevel == Diagnostic::Ignored)) {
+ LastDiagLevel = Diagnostic::Ignored;
+ return;
+ }
+
+ if (DiagLevel >= Diagnostic::Error) {
+ ErrorOccurred = true;
+ ++NumErrors;
+ }
+
+ // Finally, report it.
+ Client->HandleDiagnostic(DiagLevel, Info);
+ if (Client->IncludeInDiagnosticCounts()) ++NumDiagnostics;
+
+ CurDiagID = ~0U;
+}
+
+
+DiagnosticClient::~DiagnosticClient() {}
+
+
+/// ModifierIs - Return true if the specified modifier matches specified string.
+template <std::size_t StrLen>
+static bool ModifierIs(const char *Modifier, unsigned ModifierLen,
+ const char (&Str)[StrLen]) {
+ return StrLen-1 == ModifierLen && !memcmp(Modifier, Str, StrLen-1);
+}
+
+/// HandleSelectModifier - Handle the integer 'select' modifier. This is used
+/// like this: %select{foo|bar|baz}2. This means that the integer argument
+/// "%2" has a value from 0-2. If the value is 0, the diagnostic prints 'foo'.
+/// If the value is 1, it prints 'bar'. If it has the value 2, it prints 'baz'.
+/// This is very useful for certain classes of variant diagnostics.
+static void HandleSelectModifier(unsigned ValNo,
+ const char *Argument, unsigned ArgumentLen,
+ llvm::SmallVectorImpl<char> &OutStr) {
+ const char *ArgumentEnd = Argument+ArgumentLen;
+
+ // Skip over 'ValNo' |'s.
+ while (ValNo) {
+ const char *NextVal = std::find(Argument, ArgumentEnd, '|');
+ assert(NextVal != ArgumentEnd && "Value for integer select modifier was"
+ " larger than the number of options in the diagnostic string!");
+ Argument = NextVal+1; // Skip this string.
+ --ValNo;
+ }
+
+ // Get the end of the value. This is either the } or the |.
+ const char *EndPtr = std::find(Argument, ArgumentEnd, '|');
+ // Add the value to the output string.
+ OutStr.append(Argument, EndPtr);
+}
+
+/// HandleIntegerSModifier - Handle the integer 's' modifier. This adds the
+/// letter 's' to the string if the value is not 1. This is used in cases like
+/// this: "you idiot, you have %4 parameter%s4!".
+static void HandleIntegerSModifier(unsigned ValNo,
+ llvm::SmallVectorImpl<char> &OutStr) {
+ if (ValNo != 1)
+ OutStr.push_back('s');
+}
+
+
+/// PluralNumber - Parse an unsigned integer and advance Start.
+static unsigned PluralNumber(const char *&Start, const char *End) {
+ // Programming 101: Parse a decimal number :-)
+ unsigned Val = 0;
+ while (Start != End && *Start >= '0' && *Start <= '9') {
+ Val *= 10;
+ Val += *Start - '0';
+ ++Start;
+ }
+ return Val;
+}
+
+/// TestPluralRange - Test if Val is in the parsed range. Modifies Start.
+static bool TestPluralRange(unsigned Val, const char *&Start, const char *End) {
+ if (*Start != '[') {
+ unsigned Ref = PluralNumber(Start, End);
+ return Ref == Val;
+ }
+
+ ++Start;
+ unsigned Low = PluralNumber(Start, End);
+ assert(*Start == ',' && "Bad plural expression syntax: expected ,");
+ ++Start;
+ unsigned High = PluralNumber(Start, End);
+ assert(*Start == ']' && "Bad plural expression syntax: expected )");
+ ++Start;
+ return Low <= Val && Val <= High;
+}
+
+/// EvalPluralExpr - Actual expression evaluator for HandlePluralModifier.
+static bool EvalPluralExpr(unsigned ValNo, const char *Start, const char *End) {
+ // Empty condition?
+ if (*Start == ':')
+ return true;
+
+ while (1) {
+ char C = *Start;
+ if (C == '%') {
+ // Modulo expression
+ ++Start;
+ unsigned Arg = PluralNumber(Start, End);
+ assert(*Start == '=' && "Bad plural expression syntax: expected =");
+ ++Start;
+ unsigned ValMod = ValNo % Arg;
+ if (TestPluralRange(ValMod, Start, End))
+ return true;
+ } else {
+ assert((C == '[' || (C >= '0' && C <= '9')) &&
+ "Bad plural expression syntax: unexpected character");
+ // Range expression
+ if (TestPluralRange(ValNo, Start, End))
+ return true;
+ }
+
+ // Scan for next or-expr part.
+ Start = std::find(Start, End, ',');
+ if(Start == End)
+ break;
+ ++Start;
+ }
+ return false;
+}
+
+/// HandlePluralModifier - Handle the integer 'plural' modifier. This is used
+/// for complex plural forms, or in languages where all plurals are complex.
+/// The syntax is: %plural{cond1:form1|cond2:form2|:form3}, where condn are
+/// conditions that are tested in order, the form corresponding to the first
+/// that applies being emitted. The empty condition is always true, making the
+/// last form a default case.
+/// Conditions are simple boolean expressions, where n is the number argument.
+/// Here are the rules.
+/// condition := expression | empty
+/// empty := -> always true
+/// expression := numeric [',' expression] -> logical or
+/// numeric := range -> true if n in range
+/// | '%' number '=' range -> true if n % number in range
+/// range := number
+/// | '[' number ',' number ']' -> ranges are inclusive both ends
+///
+/// Here are some examples from the GNU gettext manual written in this form:
+/// English:
+/// {1:form0|:form1}
+/// Latvian:
+/// {0:form2|%100=11,%10=0,%10=[2,9]:form1|:form0}
+/// Gaeilge:
+/// {1:form0|2:form1|:form2}
+/// Romanian:
+/// {1:form0|0,%100=[1,19]:form1|:form2}
+/// Lithuanian:
+/// {%10=0,%100=[10,19]:form2|%10=1:form0|:form1}
+/// Russian (requires repeated form):
+/// {%100=[11,14]:form2|%10=1:form0|%10=[2,4]:form1|:form2}
+/// Slovak
+/// {1:form0|[2,4]:form1|:form2}
+/// Polish (requires repeated form):
+/// {1:form0|%100=[10,20]:form2|%10=[2,4]:form1|:form2}
+static void HandlePluralModifier(unsigned ValNo,
+ const char *Argument, unsigned ArgumentLen,
+ llvm::SmallVectorImpl<char> &OutStr) {
+ const char *ArgumentEnd = Argument + ArgumentLen;
+ while (1) {
+ assert(Argument < ArgumentEnd && "Plural expression didn't match.");
+ const char *ExprEnd = Argument;
+ while (*ExprEnd != ':') {
+ assert(ExprEnd != ArgumentEnd && "Plural missing expression end");
+ ++ExprEnd;
+ }
+ if (EvalPluralExpr(ValNo, Argument, ExprEnd)) {
+ Argument = ExprEnd + 1;
+ ExprEnd = std::find(Argument, ArgumentEnd, '|');
+ OutStr.append(Argument, ExprEnd);
+ return;
+ }
+ Argument = std::find(Argument, ArgumentEnd - 1, '|') + 1;
+ }
+}
+
+
+/// FormatDiagnostic - Format this diagnostic into a string, substituting the
+/// formal arguments into the %0 slots. The result is appended onto the Str
+/// array.
+void DiagnosticInfo::
+FormatDiagnostic(llvm::SmallVectorImpl<char> &OutStr) const {
+ const char *DiagStr = getDiags()->getDescription(getID());
+ const char *DiagEnd = DiagStr+strlen(DiagStr);
+
+ while (DiagStr != DiagEnd) {
+ if (DiagStr[0] != '%') {
+ // Append non-%0 substrings to Str if we have one.
+ const char *StrEnd = std::find(DiagStr, DiagEnd, '%');
+ OutStr.append(DiagStr, StrEnd);
+ DiagStr = StrEnd;
+ continue;
+ } else if (DiagStr[1] == '%') {
+ OutStr.push_back('%'); // %% -> %.
+ DiagStr += 2;
+ continue;
+ }
+
+ // Skip the %.
+ ++DiagStr;
+
+ // This must be a placeholder for a diagnostic argument. The format for a
+ // placeholder is one of "%0", "%modifier0", or "%modifier{arguments}0".
+ // The digit is a number from 0-9 indicating which argument this comes from.
+ // The modifier is a string of digits from the set [-a-z]+, arguments is a
+ // brace enclosed string.
+ const char *Modifier = 0, *Argument = 0;
+ unsigned ModifierLen = 0, ArgumentLen = 0;
+
+ // Check to see if we have a modifier. If so eat it.
+ if (!isdigit(DiagStr[0])) {
+ Modifier = DiagStr;
+ while (DiagStr[0] == '-' ||
+ (DiagStr[0] >= 'a' && DiagStr[0] <= 'z'))
+ ++DiagStr;
+ ModifierLen = DiagStr-Modifier;
+
+ // If we have an argument, get it next.
+ if (DiagStr[0] == '{') {
+ ++DiagStr; // Skip {.
+ Argument = DiagStr;
+
+ for (; DiagStr[0] != '}'; ++DiagStr)
+ assert(DiagStr[0] && "Mismatched {}'s in diagnostic string!");
+ ArgumentLen = DiagStr-Argument;
+ ++DiagStr; // Skip }.
+ }
+ }
+
+ assert(isdigit(*DiagStr) && "Invalid format for argument in diagnostic");
+ unsigned ArgNo = *DiagStr++ - '0';
+
+ switch (getArgKind(ArgNo)) {
+ // ---- STRINGS ----
+ case Diagnostic::ak_std_string: {
+ const std::string &S = getArgStdStr(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+ OutStr.append(S.begin(), S.end());
+ break;
+ }
+ case Diagnostic::ak_c_string: {
+ const char *S = getArgCStr(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+
+ // Don't crash if get passed a null pointer by accident.
+ if (!S)
+ S = "(null)";
+
+ OutStr.append(S, S + strlen(S));
+ break;
+ }
+ // ---- INTEGERS ----
+ case Diagnostic::ak_sint: {
+ int Val = getArgSInt(ArgNo);
+
+ if (ModifierIs(Modifier, ModifierLen, "select")) {
+ HandleSelectModifier((unsigned)Val, Argument, ArgumentLen, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "s")) {
+ HandleIntegerSModifier(Val, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
+ HandlePluralModifier((unsigned)Val, Argument, ArgumentLen, OutStr);
+ } else {
+ assert(ModifierLen == 0 && "Unknown integer modifier");
+ // FIXME: Optimize
+ std::string S = llvm::itostr(Val);
+ OutStr.append(S.begin(), S.end());
+ }
+ break;
+ }
+ case Diagnostic::ak_uint: {
+ unsigned Val = getArgUInt(ArgNo);
+
+ if (ModifierIs(Modifier, ModifierLen, "select")) {
+ HandleSelectModifier(Val, Argument, ArgumentLen, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "s")) {
+ HandleIntegerSModifier(Val, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
+ HandlePluralModifier((unsigned)Val, Argument, ArgumentLen, OutStr);
+ } else {
+ assert(ModifierLen == 0 && "Unknown integer modifier");
+
+ // FIXME: Optimize
+ std::string S = llvm::utostr_32(Val);
+ OutStr.append(S.begin(), S.end());
+ }
+ break;
+ }
+ // ---- NAMES and TYPES ----
+ case Diagnostic::ak_identifierinfo: {
+ const IdentifierInfo *II = getArgIdentifier(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+
+ // Don't crash if get passed a null pointer by accident.
+ if (!II) {
+ const char *S = "(null)";
+ OutStr.append(S, S + strlen(S));
+ continue;
+ }
+
+ OutStr.push_back('\'');
+ OutStr.append(II->getName(), II->getName() + II->getLength());
+ OutStr.push_back('\'');
+ break;
+ }
+ case Diagnostic::ak_qualtype:
+ case Diagnostic::ak_declarationname:
+ case Diagnostic::ak_nameddecl:
+ getDiags()->ConvertArgToString(getArgKind(ArgNo), getRawArg(ArgNo),
+ Modifier, ModifierLen,
+ Argument, ArgumentLen, OutStr);
+ break;
+ }
+ }
+}
+
+/// IncludeInDiagnosticCounts - This method (whose default implementation
+/// returns true) indicates whether the diagnostics handled by this
+/// DiagnosticClient should be included in the number of diagnostics
+/// reported by Diagnostic.
+bool DiagnosticClient::IncludeInDiagnosticCounts() const { return true; }
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
new file mode 100644
index 0000000..cc25d33
--- /dev/null
+++ b/lib/Basic/FileManager.cpp
@@ -0,0 +1,302 @@
+///===--- FileManager.cpp - File System Probing and Caching ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the FileManager interface.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: This should index all interesting directories with dirent calls.
+// getdirentries ?
+// opendir/readdir_r/closedir ?
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/System/Path.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Config/config.h"
+using namespace clang;
+
+// FIXME: Enhance libsystem to support inode and other fields.
+#include <sys/stat.h>
+
+#if defined(_MSC_VER)
+#define S_ISDIR(s) (_S_IFDIR & s)
+#endif
+
+/// NON_EXISTENT_DIR - A special value distinct from null that is used to
+/// represent a dir name that doesn't exist on the disk.
+#define NON_EXISTENT_DIR reinterpret_cast<DirectoryEntry*>((intptr_t)-1)
+
+//===----------------------------------------------------------------------===//
+// Windows.
+//===----------------------------------------------------------------------===//
+
+#ifdef LLVM_ON_WIN32
+
+#define IS_DIR_SEPARATOR_CHAR(x) ((x) == '/' || (x) == '\\')
+
+namespace {
+ static std::string GetFullPath(const char *relPath)
+ {
+ char *absPathStrPtr = _fullpath(NULL, relPath, 0);
+ assert(absPathStrPtr && "_fullpath() returned NULL!");
+
+ std::string absPath(absPathStrPtr);
+
+ free(absPathStrPtr);
+ return absPath;
+ }
+}
+
+class FileManager::UniqueDirContainer {
+ /// UniqueDirs - Cache from full path to existing directories/files.
+ ///
+ llvm::StringMap<DirectoryEntry> UniqueDirs;
+
+public:
+ DirectoryEntry &getDirectory(const char *Name, struct stat &StatBuf) {
+ std::string FullPath(GetFullPath(Name));
+ return UniqueDirs.GetOrCreateValue(
+ FullPath.c_str(),
+ FullPath.c_str() + FullPath.size()
+ ).getValue();
+ }
+
+ size_t size() { return UniqueDirs.size(); }
+};
+
+class FileManager::UniqueFileContainer {
+ /// UniqueFiles - Cache from full path to existing directories/files.
+ ///
+ llvm::StringMap<FileEntry, llvm::BumpPtrAllocator> UniqueFiles;
+
+public:
+ FileEntry &getFile(const char *Name, struct stat &StatBuf) {
+ std::string FullPath(GetFullPath(Name));
+ return UniqueFiles.GetOrCreateValue(
+ FullPath.c_str(),
+ FullPath.c_str() + FullPath.size()
+ ).getValue();
+ }
+
+ size_t size() { return UniqueFiles.size(); }
+};
+
+//===----------------------------------------------------------------------===//
+// Unix-like Systems.
+//===----------------------------------------------------------------------===//
+
+#else
+
+#define IS_DIR_SEPARATOR_CHAR(x) ((x) == '/')
+
+class FileManager::UniqueDirContainer {
+ /// UniqueDirs - Cache from ID's to existing directories/files.
+ ///
+ std::map<std::pair<dev_t, ino_t>, DirectoryEntry> UniqueDirs;
+
+public:
+ DirectoryEntry &getDirectory(const char *Name, struct stat &StatBuf) {
+ return UniqueDirs[std::make_pair(StatBuf.st_dev, StatBuf.st_ino)];
+ }
+
+ size_t size() { return UniqueDirs.size(); }
+};
+
+class FileManager::UniqueFileContainer {
+ /// UniqueFiles - Cache from ID's to existing directories/files.
+ ///
+ std::set<FileEntry> UniqueFiles;
+
+public:
+ FileEntry &getFile(const char *Name, struct stat &StatBuf) {
+ return
+ const_cast<FileEntry&>(
+ *UniqueFiles.insert(FileEntry(StatBuf.st_dev,
+ StatBuf.st_ino,
+ StatBuf.st_mode)).first);
+ }
+
+ size_t size() { return UniqueFiles.size(); }
+};
+
+#endif
+
+//===----------------------------------------------------------------------===//
+// Common logic.
+//===----------------------------------------------------------------------===//
+
+FileManager::FileManager()
+ : UniqueDirs(*new UniqueDirContainer),
+ UniqueFiles(*new UniqueFileContainer),
+ DirEntries(64), FileEntries(64), NextFileUID(0) {
+ NumDirLookups = NumFileLookups = 0;
+ NumDirCacheMisses = NumFileCacheMisses = 0;
+}
+
+FileManager::~FileManager() {
+ delete &UniqueDirs;
+ delete &UniqueFiles;
+}
+
+/// getDirectory - Lookup, cache, and verify the specified directory. This
+/// returns null if the directory doesn't exist.
+///
+const DirectoryEntry *FileManager::getDirectory(const char *NameStart,
+ const char *NameEnd) {
+ ++NumDirLookups;
+ llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
+ DirEntries.GetOrCreateValue(NameStart, NameEnd);
+
+ // See if there is already an entry in the map.
+ if (NamedDirEnt.getValue())
+ return NamedDirEnt.getValue() == NON_EXISTENT_DIR
+ ? 0 : NamedDirEnt.getValue();
+
+ ++NumDirCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedDirEnt.setValue(NON_EXISTENT_DIR);
+
+ // Get the null-terminated directory name as stored as the key of the
+ // DirEntries map.
+ const char *InterndDirName = NamedDirEnt.getKeyData();
+
+ // Check to see if the directory exists.
+ struct stat StatBuf;
+ if (stat_cached(InterndDirName, &StatBuf) || // Error stat'ing.
+ !S_ISDIR(StatBuf.st_mode)) // Not a directory?
+ return 0;
+
+ // It exists. See if we have already opened a directory with the same inode.
+ // This occurs when one dir is symlinked to another, for example.
+ DirectoryEntry &UDE = UniqueDirs.getDirectory(InterndDirName, StatBuf);
+
+ NamedDirEnt.setValue(&UDE);
+ if (UDE.getName()) // Already have an entry with this inode, return it.
+ return &UDE;
+
+ // Otherwise, we don't have this directory yet, add it. We use the string
+ // key from the DirEntries map as the string.
+ UDE.Name = InterndDirName;
+ return &UDE;
+}
+
+/// NON_EXISTENT_FILE - A special value distinct from null that is used to
+/// represent a filename that doesn't exist on the disk.
+#define NON_EXISTENT_FILE reinterpret_cast<FileEntry*>((intptr_t)-1)
+
+/// getFile - Lookup, cache, and verify the specified file. This returns null
+/// if the file doesn't exist.
+///
+const FileEntry *FileManager::getFile(const char *NameStart,
+ const char *NameEnd) {
+ ++NumFileLookups;
+
+ // See if there is already an entry in the map.
+ llvm::StringMapEntry<FileEntry *> &NamedFileEnt =
+ FileEntries.GetOrCreateValue(NameStart, NameEnd);
+
+ // See if there is already an entry in the map.
+ if (NamedFileEnt.getValue())
+ return NamedFileEnt.getValue() == NON_EXISTENT_FILE
+ ? 0 : NamedFileEnt.getValue();
+
+ ++NumFileCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedFileEnt.setValue(NON_EXISTENT_FILE);
+
+ // Figure out what directory it is in. If the string contains a / in it,
+ // strip off everything after it.
+ // FIXME: this logic should be in sys::Path.
+ const char *SlashPos = NameEnd-1;
+ while (SlashPos >= NameStart && !IS_DIR_SEPARATOR_CHAR(SlashPos[0]))
+ --SlashPos;
+
+ const DirectoryEntry *DirInfo;
+ if (SlashPos < NameStart) {
+ // Use the current directory if file has no path component.
+ const char *Name = ".";
+ DirInfo = getDirectory(Name, Name+1);
+ } else if (SlashPos == NameEnd-1)
+ return 0; // If filename ends with a /, it's a directory.
+ else
+ DirInfo = getDirectory(NameStart, SlashPos);
+
+ if (DirInfo == 0) // Directory doesn't exist, file can't exist.
+ return 0;
+
+ // Get the null-terminated file name as stored as the key of the
+ // FileEntries map.
+ const char *InterndFileName = NamedFileEnt.getKeyData();
+
+ // FIXME: Use the directory info to prune this, before doing the stat syscall.
+ // FIXME: This will reduce the # syscalls.
+
+ // Nope, there isn't. Check to see if the file exists.
+ struct stat StatBuf;
+ //llvm::cerr << "STATING: " << Filename;
+ if (stat_cached(InterndFileName, &StatBuf) || // Error stat'ing.
+ S_ISDIR(StatBuf.st_mode)) { // A directory?
+ // If this file doesn't exist, we leave a null in FileEntries for this path.
+ //llvm::cerr << ": Not existing\n";
+ return 0;
+ }
+ //llvm::cerr << ": exists\n";
+
+ // It exists. See if we have already opened a file with the same inode.
+ // This occurs when one dir is symlinked to another, for example.
+ FileEntry &UFE = UniqueFiles.getFile(InterndFileName, StatBuf);
+
+ NamedFileEnt.setValue(&UFE);
+ if (UFE.getName()) // Already have an entry with this inode, return it.
+ return &UFE;
+
+ // Otherwise, we don't have this directory yet, add it.
+ // FIXME: Change the name to be a char* that points back to the 'FileEntries'
+ // key.
+ UFE.Name = InterndFileName;
+ UFE.Size = StatBuf.st_size;
+ UFE.ModTime = StatBuf.st_mtime;
+ UFE.Dir = DirInfo;
+ UFE.UID = NextFileUID++;
+ return &UFE;
+}
+
+void FileManager::PrintStats() const {
+ llvm::cerr << "\n*** File Manager Stats:\n";
+ llvm::cerr << UniqueFiles.size() << " files found, "
+ << UniqueDirs.size() << " dirs found.\n";
+ llvm::cerr << NumDirLookups << " dir lookups, "
+ << NumDirCacheMisses << " dir cache misses.\n";
+ llvm::cerr << NumFileLookups << " file lookups, "
+ << NumFileCacheMisses << " file cache misses.\n";
+
+ //llvm::cerr << PagesMapped << BytesOfPagesMapped << FSLookups;
+}
+
+int MemorizeStatCalls::stat(const char *path, struct stat *buf) {
+ int result = ::stat(path, buf);
+
+ if (result != 0) {
+ // Cache failed 'stat' results.
+ struct stat empty;
+ StatCalls[path] = StatResult(result, empty);
+ }
+ else if (!S_ISDIR(buf->st_mode) || llvm::sys::Path(path).isAbsolute()) {
+ // Cache file 'stat' results and directories with absolutely
+ // paths.
+ StatCalls[path] = StatResult(result, *buf);
+ }
+
+ return result;
+}
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
new file mode 100644
index 0000000..cf78da98
--- /dev/null
+++ b/lib/Basic/IdentifierTable.cpp
@@ -0,0 +1,388 @@
+//===--- IdentifierTable.cpp - Hash table for identifier lookup -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IdentifierInfo, IdentifierVisitor, and
+// IdentifierTable interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include <cstdio>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// IdentifierInfo Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo::IdentifierInfo() {
+ TokenID = tok::identifier;
+ ObjCOrBuiltinID = 0;
+ HasMacro = false;
+ IsExtension = false;
+ IsPoisoned = false;
+ IsCPPOperatorKeyword = false;
+ NeedsHandleIdentifier = false;
+ FETokenInfo = 0;
+ Entry = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// IdentifierTable Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierInfoLookup::~IdentifierInfoLookup() {}
+
+ExternalIdentifierLookup::~ExternalIdentifierLookup() {}
+
+IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
+ IdentifierInfoLookup* externalLookup)
+ : HashTable(8192), // Start with space for 8K identifiers.
+ ExternalLookup(externalLookup) {
+
+ // Populate the identifier table with info about keywords for the current
+ // language.
+ AddKeywords(LangOpts);
+}
+
+//===----------------------------------------------------------------------===//
+// Language Keyword Implementation
+//===----------------------------------------------------------------------===//
+
+// Constants for TokenKinds.def
+namespace {
+ enum {
+ KEYALL = 1,
+ KEYC99 = 2,
+ KEYCXX = 4,
+ KEYCXX0X = 8,
+ KEYGNU = 16,
+ KEYMS = 32
+ };
+}
+
+/// AddKeyword - This method is used to associate a token ID with specific
+/// identifiers because they are language keywords. This causes the lexer to
+/// automatically map matching identifiers to specialized token codes.
+///
+/// The C90/C99/CPP/CPP0x flags are set to 0 if the token should be
+/// enabled in the specified langauge, set to 1 if it is an extension
+/// in the specified language, and set to 2 if disabled in the
+/// specified language.
+static void AddKeyword(const char *Keyword, unsigned KWLen,
+ tok::TokenKind TokenCode, unsigned Flags,
+ const LangOptions &LangOpts, IdentifierTable &Table) {
+ unsigned AddResult = 0;
+ if (Flags & KEYALL) AddResult = 2;
+ else if (LangOpts.CPlusPlus && (Flags & KEYCXX)) AddResult = 2;
+ else if (LangOpts.CPlusPlus0x && (Flags & KEYCXX0X)) AddResult = 2;
+ else if (LangOpts.C99 && (Flags & KEYC99)) AddResult = 2;
+ else if (LangOpts.GNUMode && (Flags & KEYGNU)) AddResult = 1;
+ else if (LangOpts.Microsoft && (Flags & KEYMS)) AddResult = 1;
+
+ // Don't add this keyword if disabled in this language.
+ if (AddResult == 0) return;
+
+ IdentifierInfo &Info = Table.get(Keyword, Keyword+KWLen);
+ Info.setTokenID(TokenCode);
+ Info.setIsExtensionToken(AddResult == 1);
+}
+
+/// AddCXXOperatorKeyword - Register a C++ operator keyword alternative
+/// representations.
+static void AddCXXOperatorKeyword(const char *Keyword, unsigned KWLen,
+ tok::TokenKind TokenCode,
+ IdentifierTable &Table) {
+ IdentifierInfo &Info = Table.get(Keyword, Keyword + KWLen);
+ Info.setTokenID(TokenCode);
+ Info.setIsCPlusPlusOperatorKeyword();
+}
+
+/// AddObjCKeyword - Register an Objective-C @keyword like "class" "selector" or
+/// "property".
+static void AddObjCKeyword(tok::ObjCKeywordKind ObjCID,
+ const char *Name, unsigned NameLen,
+ IdentifierTable &Table) {
+ Table.get(Name, Name+NameLen).setObjCKeywordID(ObjCID);
+}
+
+/// AddKeywords - Add all keywords to the symbol table.
+///
+void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
+ // Add keywords and tokens for the current language.
+#define KEYWORD(NAME, FLAGS) \
+ AddKeyword(#NAME, strlen(#NAME), tok::kw_ ## NAME, \
+ FLAGS, LangOpts, *this);
+#define ALIAS(NAME, TOK, FLAGS) \
+ AddKeyword(NAME, strlen(NAME), tok::kw_ ## TOK, \
+ FLAGS, LangOpts, *this);
+#define CXX_KEYWORD_OPERATOR(NAME, ALIAS) \
+ if (LangOpts.CXXOperatorNames) \
+ AddCXXOperatorKeyword(#NAME, strlen(#NAME), tok::ALIAS, *this);
+#define OBJC1_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC1) \
+ AddObjCKeyword(tok::objc_##NAME, #NAME, strlen(#NAME), *this);
+#define OBJC2_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC2) \
+ AddObjCKeyword(tok::objc_##NAME, #NAME, strlen(#NAME), *this);
+#include "clang/Basic/TokenKinds.def"
+}
+
+tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
+ // We use a perfect hash function here involving the length of the keyword,
+ // the first and third character. For preprocessor ID's there are no
+ // collisions (if there were, the switch below would complain about duplicate
+ // case values). Note that this depends on 'if' being null terminated.
+
+#define HASH(LEN, FIRST, THIRD) \
+ (LEN << 5) + (((FIRST-'a') + (THIRD-'a')) & 31)
+#define CASE(LEN, FIRST, THIRD, NAME) \
+ case HASH(LEN, FIRST, THIRD): \
+ return memcmp(Name, #NAME, LEN) ? tok::pp_not_keyword : tok::pp_ ## NAME
+
+ unsigned Len = getLength();
+ if (Len < 2) return tok::pp_not_keyword;
+ const char *Name = getName();
+ switch (HASH(Len, Name[0], Name[2])) {
+ default: return tok::pp_not_keyword;
+ CASE( 2, 'i', '\0', if);
+ CASE( 4, 'e', 'i', elif);
+ CASE( 4, 'e', 's', else);
+ CASE( 4, 'l', 'n', line);
+ CASE( 4, 's', 'c', sccs);
+ CASE( 5, 'e', 'd', endif);
+ CASE( 5, 'e', 'r', error);
+ CASE( 5, 'i', 'e', ident);
+ CASE( 5, 'i', 'd', ifdef);
+ CASE( 5, 'u', 'd', undef);
+
+ CASE( 6, 'a', 's', assert);
+ CASE( 6, 'd', 'f', define);
+ CASE( 6, 'i', 'n', ifndef);
+ CASE( 6, 'i', 'p', import);
+ CASE( 6, 'p', 'a', pragma);
+
+ CASE( 7, 'd', 'f', defined);
+ CASE( 7, 'i', 'c', include);
+ CASE( 7, 'w', 'r', warning);
+
+ CASE( 8, 'u', 'a', unassert);
+ CASE(12, 'i', 'c', include_next);
+
+ CASE(16, '_', 'i', __include_macros);
+#undef CASE
+#undef HASH
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Stats Implementation
+//===----------------------------------------------------------------------===//
+
+/// PrintStats - Print statistics about how well the identifier table is doing
+/// at hashing identifiers.
+void IdentifierTable::PrintStats() const {
+ unsigned NumBuckets = HashTable.getNumBuckets();
+ unsigned NumIdentifiers = HashTable.getNumItems();
+ unsigned NumEmptyBuckets = NumBuckets-NumIdentifiers;
+ unsigned AverageIdentifierSize = 0;
+ unsigned MaxIdentifierLength = 0;
+
+ // TODO: Figure out maximum times an identifier had to probe for -stats.
+ for (llvm::StringMap<IdentifierInfo*, llvm::BumpPtrAllocator>::const_iterator
+ I = HashTable.begin(), E = HashTable.end(); I != E; ++I) {
+ unsigned IdLen = I->getKeyLength();
+ AverageIdentifierSize += IdLen;
+ if (MaxIdentifierLength < IdLen)
+ MaxIdentifierLength = IdLen;
+ }
+
+ fprintf(stderr, "\n*** Identifier Table Stats:\n");
+ fprintf(stderr, "# Identifiers: %d\n", NumIdentifiers);
+ fprintf(stderr, "# Empty Buckets: %d\n", NumEmptyBuckets);
+ fprintf(stderr, "Hash density (#identifiers per bucket): %f\n",
+ NumIdentifiers/(double)NumBuckets);
+ fprintf(stderr, "Ave identifier length: %f\n",
+ (AverageIdentifierSize/(double)NumIdentifiers));
+ fprintf(stderr, "Max identifier length: %d\n", MaxIdentifierLength);
+
+ // Compute statistics about the memory allocated for identifiers.
+ HashTable.getAllocator().PrintStats();
+}
+
+//===----------------------------------------------------------------------===//
+// SelectorTable Implementation
+//===----------------------------------------------------------------------===//
+
+unsigned llvm::DenseMapInfo<clang::Selector>::getHashValue(clang::Selector S) {
+ return DenseMapInfo<void*>::getHashValue(S.getAsOpaquePtr());
+}
+
+namespace clang {
+/// MultiKeywordSelector - One of these variable length records is kept for each
+/// selector containing more than one keyword. We use a folding set
+/// to unique aggregate names (keyword selectors in ObjC parlance). Access to
+/// this class is provided strictly through Selector.
+class MultiKeywordSelector
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+ MultiKeywordSelector(unsigned nKeys) {
+ ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
+ }
+public:
+ // Constructor for keyword selectors.
+ MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV) {
+ assert((nKeys > 1) && "not a multi-keyword selector");
+ ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
+
+ // Fill in the trailing keyword array.
+ IdentifierInfo **KeyInfo = reinterpret_cast<IdentifierInfo **>(this+1);
+ for (unsigned i = 0; i != nKeys; ++i)
+ KeyInfo[i] = IIV[i];
+ }
+
+ // getName - Derive the full selector name and return it.
+ std::string getName() const;
+
+ unsigned getNumArgs() const { return ExtraKindOrNumArgs - NUM_EXTRA_KINDS; }
+
+ typedef IdentifierInfo *const *keyword_iterator;
+ keyword_iterator keyword_begin() const {
+ return reinterpret_cast<keyword_iterator>(this+1);
+ }
+ keyword_iterator keyword_end() const {
+ return keyword_begin()+getNumArgs();
+ }
+ IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const {
+ assert(i < getNumArgs() && "getIdentifierInfoForSlot(): illegal index");
+ return keyword_begin()[i];
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ keyword_iterator ArgTys, unsigned NumArgs) {
+ ID.AddInteger(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddPointer(ArgTys[i]);
+ }
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, keyword_begin(), getNumArgs());
+ }
+};
+} // end namespace clang.
+
+unsigned Selector::getNumArgs() const {
+ unsigned IIF = getIdentifierInfoFlag();
+ if (IIF == ZeroArg)
+ return 0;
+ if (IIF == OneArg)
+ return 1;
+ // We point to a MultiKeywordSelector (pointer doesn't contain any flags).
+ MultiKeywordSelector *SI = reinterpret_cast<MultiKeywordSelector *>(InfoPtr);
+ return SI->getNumArgs();
+}
+
+IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
+ if (getIdentifierInfoFlag()) {
+ assert(argIndex == 0 && "illegal keyword index");
+ return getAsIdentifierInfo();
+ }
+ // We point to a MultiKeywordSelector (pointer doesn't contain any flags).
+ MultiKeywordSelector *SI = reinterpret_cast<MultiKeywordSelector *>(InfoPtr);
+ return SI->getIdentifierInfoForSlot(argIndex);
+}
+
+std::string MultiKeywordSelector::getName() const {
+ std::string Result;
+ unsigned Length = 0;
+ for (keyword_iterator I = keyword_begin(), E = keyword_end(); I != E; ++I) {
+ if (*I)
+ Length += (*I)->getLength();
+ ++Length; // :
+ }
+
+ Result.reserve(Length);
+
+ for (keyword_iterator I = keyword_begin(), E = keyword_end(); I != E; ++I) {
+ if (*I)
+ Result.insert(Result.end(), (*I)->getName(),
+ (*I)->getName()+(*I)->getLength());
+ Result.push_back(':');
+ }
+
+ return Result;
+}
+
+std::string Selector::getAsString() const {
+ if (InfoPtr == 0)
+ return "<null selector>";
+
+ if (InfoPtr & ArgFlags) {
+ IdentifierInfo *II = getAsIdentifierInfo();
+
+ // If the number of arguments is 0 then II is guaranteed to not be null.
+ if (getNumArgs() == 0)
+ return II->getName();
+
+ std::string Res = II ? II->getName() : "";
+ Res += ":";
+ return Res;
+ }
+
+ // We have a multiple keyword selector (no embedded flags).
+ return reinterpret_cast<MultiKeywordSelector *>(InfoPtr)->getName();
+}
+
+
+namespace {
+ struct SelectorTableImpl {
+ llvm::FoldingSet<MultiKeywordSelector> Table;
+ llvm::BumpPtrAllocator Allocator;
+ };
+} // end anonymous namespace.
+
+static SelectorTableImpl &getSelectorTableImpl(void *P) {
+ return *static_cast<SelectorTableImpl*>(P);
+}
+
+
+Selector SelectorTable::getSelector(unsigned nKeys, IdentifierInfo **IIV) {
+ if (nKeys < 2)
+ return Selector(IIV[0], nKeys);
+
+ SelectorTableImpl &SelTabImpl = getSelectorTableImpl(Impl);
+
+ // Unique selector, to guarantee there is one per name.
+ llvm::FoldingSetNodeID ID;
+ MultiKeywordSelector::Profile(ID, IIV, nKeys);
+
+ void *InsertPos = 0;
+ if (MultiKeywordSelector *SI =
+ SelTabImpl.Table.FindNodeOrInsertPos(ID, InsertPos))
+ return Selector(SI);
+
+ // MultiKeywordSelector objects are not allocated with new because they have a
+ // variable size array (for parameter types) at the end of them.
+ unsigned Size = sizeof(MultiKeywordSelector) + nKeys*sizeof(IdentifierInfo *);
+ MultiKeywordSelector *SI =
+ (MultiKeywordSelector*)SelTabImpl.Allocator.Allocate(Size,
+ llvm::alignof<MultiKeywordSelector>());
+ new (SI) MultiKeywordSelector(nKeys, IIV);
+ SelTabImpl.Table.InsertNode(SI, InsertPos);
+ return Selector(SI);
+}
+
+SelectorTable::SelectorTable() {
+ Impl = new SelectorTableImpl();
+}
+
+SelectorTable::~SelectorTable() {
+ delete &getSelectorTableImpl(Impl);
+}
+
diff --git a/lib/Basic/Makefile b/lib/Basic/Makefile
new file mode 100644
index 0000000..3fd6c2c
--- /dev/null
+++ b/lib/Basic/Makefile
@@ -0,0 +1,22 @@
+##===- clang/lib/Basic/Makefile ----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the Basic library for the C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangBasic
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Basic/SourceLocation.cpp b/lib/Basic/SourceLocation.cpp
new file mode 100644
index 0000000..f21ec8b
--- /dev/null
+++ b/lib/Basic/SourceLocation.cpp
@@ -0,0 +1,125 @@
+//==--- SourceLocation.cpp - Compact identifier for Source Files -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines accessor methods for the FullSourceLoc class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// PrettyStackTraceLoc
+//===----------------------------------------------------------------------===//
+
+void PrettyStackTraceLoc::print(llvm::raw_ostream &OS) const {
+ if (Loc.isValid()) {
+ Loc.print(OS, SM);
+ OS << ": ";
+ }
+ OS << Message << '\n';
+}
+
+//===----------------------------------------------------------------------===//
+// SourceLocation
+//===----------------------------------------------------------------------===//
+
+void SourceLocation::print(llvm::raw_ostream &OS, const SourceManager &SM)const{
+ if (!isValid()) {
+ OS << "<invalid loc>";
+ return;
+ }
+
+ if (isFileID()) {
+ PresumedLoc PLoc = SM.getPresumedLoc(*this);
+ // The instantiation and spelling pos is identical for file locs.
+ OS << PLoc.getFilename() << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ return;
+ }
+
+ SM.getInstantiationLoc(*this).print(OS, SM);
+
+ OS << " <Spelling=";
+ SM.getSpellingLoc(*this).print(OS, SM);
+ OS << '>';
+}
+
+void SourceLocation::dump(const SourceManager &SM) const {
+ print(llvm::errs(), SM);
+}
+
+//===----------------------------------------------------------------------===//
+// FullSourceLoc
+//===----------------------------------------------------------------------===//
+
+FileID FullSourceLoc::getFileID() const {
+ assert(isValid());
+ return SrcMgr->getFileID(*this);
+}
+
+
+FullSourceLoc FullSourceLoc::getInstantiationLoc() const {
+ assert(isValid());
+ return FullSourceLoc(SrcMgr->getInstantiationLoc(*this), *SrcMgr);
+}
+
+FullSourceLoc FullSourceLoc::getSpellingLoc() const {
+ assert(isValid());
+ return FullSourceLoc(SrcMgr->getSpellingLoc(*this), *SrcMgr);
+}
+
+unsigned FullSourceLoc::getInstantiationLineNumber() const {
+ assert(isValid());
+ return SrcMgr->getInstantiationLineNumber(*this);
+}
+
+unsigned FullSourceLoc::getInstantiationColumnNumber() const {
+ assert(isValid());
+ return SrcMgr->getInstantiationColumnNumber(*this);
+}
+
+unsigned FullSourceLoc::getSpellingLineNumber() const {
+ assert(isValid());
+ return SrcMgr->getSpellingLineNumber(*this);
+}
+
+unsigned FullSourceLoc::getSpellingColumnNumber() const {
+ assert(isValid());
+ return SrcMgr->getSpellingColumnNumber(*this);
+}
+
+bool FullSourceLoc::isInSystemHeader() const {
+ assert(isValid());
+ return SrcMgr->isInSystemHeader(*this);
+}
+
+const char *FullSourceLoc::getCharacterData() const {
+ assert(isValid());
+ return SrcMgr->getCharacterData(*this);
+}
+
+const llvm::MemoryBuffer* FullSourceLoc::getBuffer() const {
+ assert(isValid());
+ return SrcMgr->getBuffer(SrcMgr->getFileID(*this));
+}
+
+std::pair<const char*, const char*> FullSourceLoc::getBufferData() const {
+ const llvm::MemoryBuffer *Buf = getBuffer();
+ return std::make_pair(Buf->getBufferStart(), Buf->getBufferEnd());
+}
+
+std::pair<FileID, unsigned> FullSourceLoc::getDecomposedLoc() const {
+ return SrcMgr->getDecomposedLoc(*this);
+}
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
new file mode 100644
index 0000000..7d2d0ae
--- /dev/null
+++ b/lib/Basic/SourceManager.cpp
@@ -0,0 +1,943 @@
+//===--- SourceManager.cpp - Track and cache source files -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SourceManager interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/System/Path.h"
+#include "llvm/Support/Streams.h"
+#include <algorithm>
+#include <iostream>
+using namespace clang;
+using namespace SrcMgr;
+using llvm::MemoryBuffer;
+
+//===----------------------------------------------------------------------===//
+// SourceManager Helper Classes
+//===----------------------------------------------------------------------===//
+
+ContentCache::~ContentCache() {
+ delete Buffer;
+}
+
+/// getSizeBytesMapped - Returns the number of bytes actually mapped for
+/// this ContentCache. This can be 0 if the MemBuffer was not actually
+/// instantiated.
+unsigned ContentCache::getSizeBytesMapped() const {
+ return Buffer ? Buffer->getBufferSize() : 0;
+}
+
+/// getSize - Returns the size of the content encapsulated by this ContentCache.
+/// This can be the size of the source file or the size of an arbitrary
+/// scratch buffer. If the ContentCache encapsulates a source file, that
+/// file is not lazily brought in from disk to satisfy this query.
+unsigned ContentCache::getSize() const {
+ return Entry ? Entry->getSize() : Buffer->getBufferSize();
+}
+
+const llvm::MemoryBuffer *ContentCache::getBuffer() const {
+ // Lazily create the Buffer for ContentCaches that wrap files.
+ if (!Buffer && Entry) {
+ // FIXME: Should we support a way to not have to do this check over
+ // and over if we cannot open the file?
+ Buffer = MemoryBuffer::getFile(Entry->getName(), 0, Entry->getSize());
+ }
+ return Buffer;
+}
+
+unsigned LineTableInfo::getLineTableFilenameID(const char *Ptr, unsigned Len) {
+ // Look up the filename in the string table, returning the pre-existing value
+ // if it exists.
+ llvm::StringMapEntry<unsigned> &Entry =
+ FilenameIDs.GetOrCreateValue(Ptr, Ptr+Len, ~0U);
+ if (Entry.getValue() != ~0U)
+ return Entry.getValue();
+
+ // Otherwise, assign this the next available ID.
+ Entry.setValue(FilenamesByID.size());
+ FilenamesByID.push_back(&Entry);
+ return FilenamesByID.size()-1;
+}
+
+/// AddLineNote - Add a line note to the line table that indicates that there
+/// is a #line at the specified FID/Offset location which changes the presumed
+/// location to LineNo/FilenameID.
+void LineTableInfo::AddLineNote(unsigned FID, unsigned Offset,
+ unsigned LineNo, int FilenameID) {
+ std::vector<LineEntry> &Entries = LineEntries[FID];
+
+ assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
+ "Adding line entries out of order!");
+
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User;
+ unsigned IncludeOffset = 0;
+
+ if (!Entries.empty()) {
+ // If this is a '#line 4' after '#line 42 "foo.h"', make sure to remember
+ // that we are still in "foo.h".
+ if (FilenameID == -1)
+ FilenameID = Entries.back().FilenameID;
+
+ // If we are after a line marker that switched us to system header mode, or
+ // that set #include information, preserve it.
+ Kind = Entries.back().FileKind;
+ IncludeOffset = Entries.back().IncludeOffset;
+ }
+
+ Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, Kind,
+ IncludeOffset));
+}
+
+/// AddLineNote This is the same as the previous version of AddLineNote, but is
+/// used for GNU line markers. If EntryExit is 0, then this doesn't change the
+/// presumed #include stack. If it is 1, this is a file entry, if it is 2 then
+/// this is a file exit. FileKind specifies whether this is a system header or
+/// extern C system header.
+void LineTableInfo::AddLineNote(unsigned FID, unsigned Offset,
+ unsigned LineNo, int FilenameID,
+ unsigned EntryExit,
+ SrcMgr::CharacteristicKind FileKind) {
+ assert(FilenameID != -1 && "Unspecified filename should use other accessor");
+
+ std::vector<LineEntry> &Entries = LineEntries[FID];
+
+ assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
+ "Adding line entries out of order!");
+
+ unsigned IncludeOffset = 0;
+ if (EntryExit == 0) { // No #include stack change.
+ IncludeOffset = Entries.empty() ? 0 : Entries.back().IncludeOffset;
+ } else if (EntryExit == 1) {
+ IncludeOffset = Offset-1;
+ } else if (EntryExit == 2) {
+ assert(!Entries.empty() && Entries.back().IncludeOffset &&
+ "PPDirectives should have caught case when popping empty include stack");
+
+ // Get the include loc of the last entries' include loc as our include loc.
+ IncludeOffset = 0;
+ if (const LineEntry *PrevEntry =
+ FindNearestLineEntry(FID, Entries.back().IncludeOffset))
+ IncludeOffset = PrevEntry->IncludeOffset;
+ }
+
+ Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, FileKind,
+ IncludeOffset));
+}
+
+
+/// FindNearestLineEntry - Find the line entry nearest to FID that is before
+/// it. If there is no line entry before Offset in FID, return null.
+const LineEntry *LineTableInfo::FindNearestLineEntry(unsigned FID,
+ unsigned Offset) {
+ const std::vector<LineEntry> &Entries = LineEntries[FID];
+ assert(!Entries.empty() && "No #line entries for this FID after all!");
+
+ // It is very common for the query to be after the last #line, check this
+ // first.
+ if (Entries.back().FileOffset <= Offset)
+ return &Entries.back();
+
+ // Do a binary search to find the maximal element that is still before Offset.
+ std::vector<LineEntry>::const_iterator I =
+ std::upper_bound(Entries.begin(), Entries.end(), Offset);
+ if (I == Entries.begin()) return 0;
+ return &*--I;
+}
+
+/// \brief Add a new line entry that has already been encoded into
+/// the internal representation of the line table.
+void LineTableInfo::AddEntry(unsigned FID,
+ const std::vector<LineEntry> &Entries) {
+ LineEntries[FID] = Entries;
+}
+
+/// getLineTableFilenameID - Return the uniqued ID for the specified filename.
+///
+unsigned SourceManager::getLineTableFilenameID(const char *Ptr, unsigned Len) {
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+ return LineTable->getLineTableFilenameID(Ptr, Len);
+}
+
+
+/// AddLineNote - Add a line note to the line table for the FileID and offset
+/// specified by Loc. If FilenameID is -1, it is considered to be
+/// unspecified.
+void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
+ int FilenameID) {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
+
+ const SrcMgr::FileInfo &FileInfo = getSLocEntry(LocInfo.first).getFile();
+
+ // Remember that this file has #line directives now if it doesn't already.
+ const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
+
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+ LineTable->AddLineNote(LocInfo.first.ID, LocInfo.second, LineNo, FilenameID);
+}
+
+/// AddLineNote - Add a GNU line marker to the line table.
+void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
+ int FilenameID, bool IsFileEntry,
+ bool IsFileExit, bool IsSystemHeader,
+ bool IsExternCHeader) {
+ // If there is no filename and no flags, this is treated just like a #line,
+ // which does not change the flags of the previous line marker.
+ if (FilenameID == -1) {
+ assert(!IsFileEntry && !IsFileExit && !IsSystemHeader && !IsExternCHeader &&
+ "Can't set flags without setting the filename!");
+ return AddLineNote(Loc, LineNo, FilenameID);
+ }
+
+ std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
+ const SrcMgr::FileInfo &FileInfo = getSLocEntry(LocInfo.first).getFile();
+
+ // Remember that this file has #line directives now if it doesn't already.
+ const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
+
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+
+ SrcMgr::CharacteristicKind FileKind;
+ if (IsExternCHeader)
+ FileKind = SrcMgr::C_ExternCSystem;
+ else if (IsSystemHeader)
+ FileKind = SrcMgr::C_System;
+ else
+ FileKind = SrcMgr::C_User;
+
+ unsigned EntryExit = 0;
+ if (IsFileEntry)
+ EntryExit = 1;
+ else if (IsFileExit)
+ EntryExit = 2;
+
+ LineTable->AddLineNote(LocInfo.first.ID, LocInfo.second, LineNo, FilenameID,
+ EntryExit, FileKind);
+}
+
+LineTableInfo &SourceManager::getLineTable() {
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+ return *LineTable;
+}
+
+//===----------------------------------------------------------------------===//
+// Private 'Create' methods.
+//===----------------------------------------------------------------------===//
+
+SourceManager::~SourceManager() {
+ delete LineTable;
+
+ // Delete FileEntry objects corresponding to content caches. Since the actual
+ // content cache objects are bump pointer allocated, we just have to run the
+ // dtors, but we call the deallocate method for completeness.
+ for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i) {
+ MemBufferInfos[i]->~ContentCache();
+ ContentCacheAlloc.Deallocate(MemBufferInfos[i]);
+ }
+ for (llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>::iterator
+ I = FileInfos.begin(), E = FileInfos.end(); I != E; ++I) {
+ I->second->~ContentCache();
+ ContentCacheAlloc.Deallocate(I->second);
+ }
+}
+
+void SourceManager::clearIDTables() {
+ MainFileID = FileID();
+ SLocEntryTable.clear();
+ LastLineNoFileIDQuery = FileID();
+ LastLineNoContentCache = 0;
+ LastFileIDLookup = FileID();
+
+ if (LineTable)
+ LineTable->clear();
+
+ // Use up FileID #0 as an invalid instantiation.
+ NextOffset = 0;
+ createInstantiationLoc(SourceLocation(),SourceLocation(),SourceLocation(), 1);
+}
+
+/// getOrCreateContentCache - Create or return a cached ContentCache for the
+/// specified file.
+const ContentCache *
+SourceManager::getOrCreateContentCache(const FileEntry *FileEnt) {
+ assert(FileEnt && "Didn't specify a file entry to use?");
+
+ // Do we already have information about this file?
+ ContentCache *&Entry = FileInfos[FileEnt];
+ if (Entry) return Entry;
+
+ // Nope, create a new Cache entry. Make sure it is at least 8-byte aligned
+ // so that FileInfo can use the low 3 bits of the pointer for its own
+ // nefarious purposes.
+ unsigned EntryAlign = llvm::AlignOf<ContentCache>::Alignment;
+ EntryAlign = std::max(8U, EntryAlign);
+ Entry = ContentCacheAlloc.Allocate<ContentCache>(1, EntryAlign);
+ new (Entry) ContentCache(FileEnt);
+ return Entry;
+}
+
+
+/// createMemBufferContentCache - Create a new ContentCache for the specified
+/// memory buffer. This does no caching.
+const ContentCache*
+SourceManager::createMemBufferContentCache(const MemoryBuffer *Buffer) {
+ // Add a new ContentCache to the MemBufferInfos list and return it. Make sure
+ // it is at least 8-byte aligned so that FileInfo can use the low 3 bits of
+ // the pointer for its own nefarious purposes.
+ unsigned EntryAlign = llvm::AlignOf<ContentCache>::Alignment;
+ EntryAlign = std::max(8U, EntryAlign);
+ ContentCache *Entry = ContentCacheAlloc.Allocate<ContentCache>(1, EntryAlign);
+ new (Entry) ContentCache();
+ MemBufferInfos.push_back(Entry);
+ Entry->setBuffer(Buffer);
+ return Entry;
+}
+
+void SourceManager::PreallocateSLocEntries(ExternalSLocEntrySource *Source,
+ unsigned NumSLocEntries,
+ unsigned NextOffset) {
+ ExternalSLocEntries = Source;
+ this->NextOffset = NextOffset;
+ SLocEntryLoaded.resize(NumSLocEntries + 1);
+ SLocEntryLoaded[0] = true;
+ SLocEntryTable.resize(SLocEntryTable.size() + NumSLocEntries);
+}
+
+void SourceManager::ClearPreallocatedSLocEntries() {
+ unsigned I = 0;
+ for (unsigned N = SLocEntryLoaded.size(); I != N; ++I)
+ if (!SLocEntryLoaded[I])
+ break;
+
+ // We've already loaded all preallocated source location entries.
+ if (I == SLocEntryLoaded.size())
+ return;
+
+ // Remove everything from location I onward.
+ SLocEntryTable.resize(I);
+ SLocEntryLoaded.clear();
+ ExternalSLocEntries = 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods to create new FileID's and instantiations.
+//===----------------------------------------------------------------------===//
+
+/// createFileID - Create a new fileID for the specified ContentCache and
+/// include position. This works regardless of whether the ContentCache
+/// corresponds to a file or some other input source.
+FileID SourceManager::createFileID(const ContentCache *File,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ unsigned PreallocatedID,
+ unsigned Offset) {
+ SLocEntry NewEntry = SLocEntry::get(NextOffset,
+ FileInfo::get(IncludePos, File,
+ FileCharacter));
+ if (PreallocatedID) {
+ // If we're filling in a preallocated ID, just load in the file
+ // entry and return.
+ assert(PreallocatedID < SLocEntryLoaded.size() &&
+ "Preallocate ID out-of-range");
+ assert(!SLocEntryLoaded[PreallocatedID] &&
+ "Source location entry already loaded");
+ assert(Offset && "Preallocate source location cannot have zero offset");
+ SLocEntryTable[PreallocatedID]
+ = SLocEntry::get(Offset, FileInfo::get(IncludePos, File, FileCharacter));
+ SLocEntryLoaded[PreallocatedID] = true;
+ return LastFileIDLookup = FileID::get(PreallocatedID);
+ }
+
+ SLocEntryTable.push_back(SLocEntry::get(NextOffset,
+ FileInfo::get(IncludePos, File,
+ FileCharacter)));
+ unsigned FileSize = File->getSize();
+ assert(NextOffset+FileSize+1 > NextOffset && "Ran out of source locations!");
+ NextOffset += FileSize+1;
+
+ // Set LastFileIDLookup to the newly created file. The next getFileID call is
+ // almost guaranteed to be from that file.
+ return LastFileIDLookup = FileID::get(SLocEntryTable.size()-1);
+}
+
+/// createInstantiationLoc - Return a new SourceLocation that encodes the fact
+/// that a token from SpellingLoc should actually be referenced from
+/// InstantiationLoc.
+SourceLocation SourceManager::createInstantiationLoc(SourceLocation SpellingLoc,
+ SourceLocation ILocStart,
+ SourceLocation ILocEnd,
+ unsigned TokLength,
+ unsigned PreallocatedID,
+ unsigned Offset) {
+ InstantiationInfo II = InstantiationInfo::get(ILocStart,ILocEnd, SpellingLoc);
+ if (PreallocatedID) {
+ // If we're filling in a preallocated ID, just load in the
+ // instantiation entry and return.
+ assert(PreallocatedID < SLocEntryLoaded.size() &&
+ "Preallocate ID out-of-range");
+ assert(!SLocEntryLoaded[PreallocatedID] &&
+ "Source location entry already loaded");
+ assert(Offset && "Preallocate source location cannot have zero offset");
+ SLocEntryTable[PreallocatedID] = SLocEntry::get(Offset, II);
+ SLocEntryLoaded[PreallocatedID] = true;
+ return SourceLocation::getMacroLoc(Offset);
+ }
+ SLocEntryTable.push_back(SLocEntry::get(NextOffset, II));
+ assert(NextOffset+TokLength+1 > NextOffset && "Ran out of source locations!");
+ NextOffset += TokLength+1;
+ return SourceLocation::getMacroLoc(NextOffset-(TokLength+1));
+}
+
+/// getBufferData - Return a pointer to the start and end of the source buffer
+/// data for the specified FileID.
+std::pair<const char*, const char*>
+SourceManager::getBufferData(FileID FID) const {
+ const llvm::MemoryBuffer *Buf = getBuffer(FID);
+ return std::make_pair(Buf->getBufferStart(), Buf->getBufferEnd());
+}
+
+
+//===----------------------------------------------------------------------===//
+// SourceLocation manipulation methods.
+//===----------------------------------------------------------------------===//
+
+/// getFileIDSlow - Return the FileID for a SourceLocation. This is a very hot
+/// method that is used for all SourceManager queries that start with a
+/// SourceLocation object. It is responsible for finding the entry in
+/// SLocEntryTable which contains the specified location.
+///
+FileID SourceManager::getFileIDSlow(unsigned SLocOffset) const {
+ assert(SLocOffset && "Invalid FileID");
+
+ // After the first and second level caches, I see two common sorts of
+ // behavior: 1) a lot of searched FileID's are "near" the cached file location
+ // or are "near" the cached instantiation location. 2) others are just
+ // completely random and may be a very long way away.
+ //
+ // To handle this, we do a linear search for up to 8 steps to catch #1 quickly
+ // then we fall back to a less cache efficient, but more scalable, binary
+ // search to find the location.
+
+ // See if this is near the file point - worst case we start scanning from the
+ // most newly created FileID.
+ std::vector<SrcMgr::SLocEntry>::const_iterator I;
+
+ if (SLocEntryTable[LastFileIDLookup.ID].getOffset() < SLocOffset) {
+ // Neither loc prunes our search.
+ I = SLocEntryTable.end();
+ } else {
+ // Perhaps it is near the file point.
+ I = SLocEntryTable.begin()+LastFileIDLookup.ID;
+ }
+
+ // Find the FileID that contains this. "I" is an iterator that points to a
+ // FileID whose offset is known to be larger than SLocOffset.
+ unsigned NumProbes = 0;
+ while (1) {
+ --I;
+ if (ExternalSLocEntries)
+ getSLocEntry(FileID::get(I - SLocEntryTable.begin()));
+ if (I->getOffset() <= SLocOffset) {
+#if 0
+ printf("lin %d -> %d [%s] %d %d\n", SLocOffset,
+ I-SLocEntryTable.begin(),
+ I->isInstantiation() ? "inst" : "file",
+ LastFileIDLookup.ID, int(SLocEntryTable.end()-I));
+#endif
+ FileID Res = FileID::get(I-SLocEntryTable.begin());
+
+ // If this isn't an instantiation, remember it. We have good locality
+ // across FileID lookups.
+ if (!I->isInstantiation())
+ LastFileIDLookup = Res;
+ NumLinearScans += NumProbes+1;
+ return Res;
+ }
+ if (++NumProbes == 8)
+ break;
+ }
+
+ // Convert "I" back into an index. We know that it is an entry whose index is
+ // larger than the offset we are looking for.
+ unsigned GreaterIndex = I-SLocEntryTable.begin();
+ // LessIndex - This is the lower bound of the range that we're searching.
+ // We know that the offset corresponding to the FileID is is less than
+ // SLocOffset.
+ unsigned LessIndex = 0;
+ NumProbes = 0;
+ while (1) {
+ unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
+ unsigned MidOffset = getSLocEntry(FileID::get(MiddleIndex)).getOffset();
+
+ ++NumProbes;
+
+ // If the offset of the midpoint is too large, chop the high side of the
+ // range to the midpoint.
+ if (MidOffset > SLocOffset) {
+ GreaterIndex = MiddleIndex;
+ continue;
+ }
+
+ // If the middle index contains the value, succeed and return.
+ if (isOffsetInFileID(FileID::get(MiddleIndex), SLocOffset)) {
+#if 0
+ printf("bin %d -> %d [%s] %d %d\n", SLocOffset,
+ I-SLocEntryTable.begin(),
+ I->isInstantiation() ? "inst" : "file",
+ LastFileIDLookup.ID, int(SLocEntryTable.end()-I));
+#endif
+ FileID Res = FileID::get(MiddleIndex);
+
+ // If this isn't an instantiation, remember it. We have good locality
+ // across FileID lookups.
+ if (!I->isInstantiation())
+ LastFileIDLookup = Res;
+ NumBinaryProbes += NumProbes;
+ return Res;
+ }
+
+ // Otherwise, move the low-side up to the middle index.
+ LessIndex = MiddleIndex;
+ }
+}
+
+SourceLocation SourceManager::
+getInstantiationLocSlowCase(SourceLocation Loc) const {
+ do {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
+ Loc = getSLocEntry(LocInfo.first).getInstantiation()
+ .getInstantiationLocStart();
+ Loc = Loc.getFileLocWithOffset(LocInfo.second);
+ } while (!Loc.isFileID());
+
+ return Loc;
+}
+
+SourceLocation SourceManager::getSpellingLocSlowCase(SourceLocation Loc) const {
+ do {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
+ Loc = getSLocEntry(LocInfo.first).getInstantiation().getSpellingLoc();
+ Loc = Loc.getFileLocWithOffset(LocInfo.second);
+ } while (!Loc.isFileID());
+ return Loc;
+}
+
+
+std::pair<FileID, unsigned>
+SourceManager::getDecomposedInstantiationLocSlowCase(const SrcMgr::SLocEntry *E,
+ unsigned Offset) const {
+ // If this is an instantiation record, walk through all the instantiation
+ // points.
+ FileID FID;
+ SourceLocation Loc;
+ do {
+ Loc = E->getInstantiation().getInstantiationLocStart();
+
+ FID = getFileID(Loc);
+ E = &getSLocEntry(FID);
+ Offset += Loc.getOffset()-E->getOffset();
+ } while (!Loc.isFileID());
+
+ return std::make_pair(FID, Offset);
+}
+
+std::pair<FileID, unsigned>
+SourceManager::getDecomposedSpellingLocSlowCase(const SrcMgr::SLocEntry *E,
+ unsigned Offset) const {
+ // If this is an instantiation record, walk through all the instantiation
+ // points.
+ FileID FID;
+ SourceLocation Loc;
+ do {
+ Loc = E->getInstantiation().getSpellingLoc();
+
+ FID = getFileID(Loc);
+ E = &getSLocEntry(FID);
+ Offset += Loc.getOffset()-E->getOffset();
+ } while (!Loc.isFileID());
+
+ return std::make_pair(FID, Offset);
+}
+
+/// getImmediateSpellingLoc - Given a SourceLocation object, return the
+/// spelling location referenced by the ID. This is the first level down
+/// towards the place where the characters that make up the lexed token can be
+/// found. This should not generally be used by clients.
+SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
+ if (Loc.isFileID()) return Loc;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
+ Loc = getSLocEntry(LocInfo.first).getInstantiation().getSpellingLoc();
+ return Loc.getFileLocWithOffset(LocInfo.second);
+}
+
+
+/// getImmediateInstantiationRange - Loc is required to be an instantiation
+/// location. Return the start/end of the instantiation information.
+std::pair<SourceLocation,SourceLocation>
+SourceManager::getImmediateInstantiationRange(SourceLocation Loc) const {
+ assert(Loc.isMacroID() && "Not an instantiation loc!");
+ const InstantiationInfo &II = getSLocEntry(getFileID(Loc)).getInstantiation();
+ return II.getInstantiationLocRange();
+}
+
+/// getInstantiationRange - Given a SourceLocation object, return the
+/// range of tokens covered by the instantiation in the ultimate file.
+std::pair<SourceLocation,SourceLocation>
+SourceManager::getInstantiationRange(SourceLocation Loc) const {
+ if (Loc.isFileID()) return std::make_pair(Loc, Loc);
+
+ std::pair<SourceLocation,SourceLocation> Res =
+ getImmediateInstantiationRange(Loc);
+
+ // Fully resolve the start and end locations to their ultimate instantiation
+ // points.
+ while (!Res.first.isFileID())
+ Res.first = getImmediateInstantiationRange(Res.first).first;
+ while (!Res.second.isFileID())
+ Res.second = getImmediateInstantiationRange(Res.second).second;
+ return Res;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Queries about the code at a SourceLocation.
+//===----------------------------------------------------------------------===//
+
+/// getCharacterData - Return a pointer to the start of the specified location
+/// in the appropriate MemoryBuffer.
+const char *SourceManager::getCharacterData(SourceLocation SL) const {
+ // Note that this is a hot function in the getSpelling() path, which is
+ // heavily used by -E mode.
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(SL);
+
+ // Note that calling 'getBuffer()' may lazily page in a source file.
+ return getSLocEntry(LocInfo.first).getFile().getContentCache()
+ ->getBuffer()->getBufferStart() + LocInfo.second;
+}
+
+
+/// getColumnNumber - Return the column # for the specified file position.
+/// this is significantly cheaper to compute than the line number.
+unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos) const {
+ const char *Buf = getBuffer(FID)->getBufferStart();
+
+ unsigned LineStart = FilePos;
+ while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
+ --LineStart;
+ return FilePos-LineStart+1;
+}
+
+unsigned SourceManager::getSpellingColumnNumber(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
+ return getColumnNumber(LocInfo.first, LocInfo.second);
+}
+
+unsigned SourceManager::getInstantiationColumnNumber(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
+ return getColumnNumber(LocInfo.first, LocInfo.second);
+}
+
+
+
+static void ComputeLineNumbers(ContentCache* FI,
+ llvm::BumpPtrAllocator &Alloc) DISABLE_INLINE;
+static void ComputeLineNumbers(ContentCache* FI, llvm::BumpPtrAllocator &Alloc){
+ // Note that calling 'getBuffer()' may lazily page in the file.
+ const MemoryBuffer *Buffer = FI->getBuffer();
+
+ // Find the file offsets of all of the *physical* source lines. This does
+ // not look at trigraphs, escaped newlines, or anything else tricky.
+ std::vector<unsigned> LineOffsets;
+
+ // Line #1 starts at char 0.
+ LineOffsets.push_back(0);
+
+ const unsigned char *Buf = (const unsigned char *)Buffer->getBufferStart();
+ const unsigned char *End = (const unsigned char *)Buffer->getBufferEnd();
+ unsigned Offs = 0;
+ while (1) {
+ // Skip over the contents of the line.
+ // TODO: Vectorize this? This is very performance sensitive for programs
+ // with lots of diagnostics and in -E mode.
+ const unsigned char *NextBuf = (const unsigned char *)Buf;
+ while (*NextBuf != '\n' && *NextBuf != '\r' && *NextBuf != '\0')
+ ++NextBuf;
+ Offs += NextBuf-Buf;
+ Buf = NextBuf;
+
+ if (Buf[0] == '\n' || Buf[0] == '\r') {
+ // If this is \n\r or \r\n, skip both characters.
+ if ((Buf[1] == '\n' || Buf[1] == '\r') && Buf[0] != Buf[1])
+ ++Offs, ++Buf;
+ ++Offs, ++Buf;
+ LineOffsets.push_back(Offs);
+ } else {
+ // Otherwise, this is a null. If end of file, exit.
+ if (Buf == End) break;
+ // Otherwise, skip the null.
+ ++Offs, ++Buf;
+ }
+ }
+
+ // Copy the offsets into the FileInfo structure.
+ FI->NumLines = LineOffsets.size();
+ FI->SourceLineCache = Alloc.Allocate<unsigned>(LineOffsets.size());
+ std::copy(LineOffsets.begin(), LineOffsets.end(), FI->SourceLineCache);
+}
+
+/// getLineNumber - Given a SourceLocation, return the spelling line number
+/// for the position indicated. This requires building and caching a table of
+/// line offsets for the MemoryBuffer, so this is not cheap: use only when
+/// about to emit a diagnostic.
+unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos) const {
+ ContentCache *Content;
+ if (LastLineNoFileIDQuery == FID)
+ Content = LastLineNoContentCache;
+ else
+ Content = const_cast<ContentCache*>(getSLocEntry(FID)
+ .getFile().getContentCache());
+
+ // If this is the first use of line information for this buffer, compute the
+ /// SourceLineCache for it on demand.
+ if (Content->SourceLineCache == 0)
+ ComputeLineNumbers(Content, ContentCacheAlloc);
+
+ // Okay, we know we have a line number table. Do a binary search to find the
+ // line number that this character position lands on.
+ unsigned *SourceLineCache = Content->SourceLineCache;
+ unsigned *SourceLineCacheStart = SourceLineCache;
+ unsigned *SourceLineCacheEnd = SourceLineCache + Content->NumLines;
+
+ unsigned QueriedFilePos = FilePos+1;
+
+ // FIXME: I would like to be convinced that this code is worth being as
+ // complicated as it is, binary search isn't that slow.
+ //
+ // If it is worth being optimized, then in my opinion it could be more
+ // performant, simpler, and more obviously correct by just "galloping" outward
+ // from the queried file position. In fact, this could be incorporated into a
+ // generic algorithm such as lower_bound_with_hint.
+ //
+ // If someone gives me a test case where this matters, and I will do it! - DWD
+
+ // If the previous query was to the same file, we know both the file pos from
+ // that query and the line number returned. This allows us to narrow the
+ // search space from the entire file to something near the match.
+ if (LastLineNoFileIDQuery == FID) {
+ if (QueriedFilePos >= LastLineNoFilePos) {
+ // FIXME: Potential overflow?
+ SourceLineCache = SourceLineCache+LastLineNoResult-1;
+
+ // The query is likely to be nearby the previous one. Here we check to
+ // see if it is within 5, 10 or 20 lines. It can be far away in cases
+ // where big comment blocks and vertical whitespace eat up lines but
+ // contribute no tokens.
+ if (SourceLineCache+5 < SourceLineCacheEnd) {
+ if (SourceLineCache[5] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+5;
+ else if (SourceLineCache+10 < SourceLineCacheEnd) {
+ if (SourceLineCache[10] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+10;
+ else if (SourceLineCache+20 < SourceLineCacheEnd) {
+ if (SourceLineCache[20] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+20;
+ }
+ }
+ }
+ } else {
+ if (LastLineNoResult < Content->NumLines)
+ SourceLineCacheEnd = SourceLineCache+LastLineNoResult+1;
+ }
+ }
+
+ // If the spread is large, do a "radix" test as our initial guess, based on
+ // the assumption that lines average to approximately the same length.
+ // NOTE: This is currently disabled, as it does not appear to be profitable in
+ // initial measurements.
+ if (0 && SourceLineCacheEnd-SourceLineCache > 20) {
+ unsigned FileLen = Content->SourceLineCache[Content->NumLines-1];
+
+ // Take a stab at guessing where it is.
+ unsigned ApproxPos = Content->NumLines*QueriedFilePos / FileLen;
+
+ // Check for -10 and +10 lines.
+ unsigned LowerBound = std::max(int(ApproxPos-10), 0);
+ unsigned UpperBound = std::min(ApproxPos+10, FileLen);
+
+ // If the computed lower bound is less than the query location, move it in.
+ if (SourceLineCache < SourceLineCacheStart+LowerBound &&
+ SourceLineCacheStart[LowerBound] < QueriedFilePos)
+ SourceLineCache = SourceLineCacheStart+LowerBound;
+
+ // If the computed upper bound is greater than the query location, move it.
+ if (SourceLineCacheEnd > SourceLineCacheStart+UpperBound &&
+ SourceLineCacheStart[UpperBound] >= QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCacheStart+UpperBound;
+ }
+
+ unsigned *Pos
+ = std::lower_bound(SourceLineCache, SourceLineCacheEnd, QueriedFilePos);
+ unsigned LineNo = Pos-SourceLineCacheStart;
+
+ LastLineNoFileIDQuery = FID;
+ LastLineNoContentCache = Content;
+ LastLineNoFilePos = QueriedFilePos;
+ LastLineNoResult = LineNo;
+ return LineNo;
+}
+
+unsigned SourceManager::getInstantiationLineNumber(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
+ return getLineNumber(LocInfo.first, LocInfo.second);
+}
+unsigned SourceManager::getSpellingLineNumber(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
+ return getLineNumber(LocInfo.first, LocInfo.second);
+}
+
+/// getFileCharacteristic - return the file characteristic of the specified
+/// source location, indicating whether this is a normal file, a system
+/// header, or an "implicit extern C" system header.
+///
+/// This state can be modified with flags on GNU linemarker directives like:
+/// # 4 "foo.h" 3
+/// which changes all source locations in the current file after that to be
+/// considered to be from a system header.
+SrcMgr::CharacteristicKind
+SourceManager::getFileCharacteristic(SourceLocation Loc) const {
+ assert(!Loc.isInvalid() && "Can't get file characteristic of invalid loc!");
+ std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
+ const SrcMgr::FileInfo &FI = getSLocEntry(LocInfo.first).getFile();
+
+ // If there are no #line directives in this file, just return the whole-file
+ // state.
+ if (!FI.hasLineDirectives())
+ return FI.getFileCharacteristic();
+
+ assert(LineTable && "Can't have linetable entries without a LineTable!");
+ // See if there is a #line directive before the location.
+ const LineEntry *Entry =
+ LineTable->FindNearestLineEntry(LocInfo.first.ID, LocInfo.second);
+
+ // If this is before the first line marker, use the file characteristic.
+ if (!Entry)
+ return FI.getFileCharacteristic();
+
+ return Entry->FileKind;
+}
+
+/// Return the filename or buffer identifier of the buffer the location is in.
+/// Note that this name does not respect #line directives. Use getPresumedLoc
+/// for normal clients.
+const char *SourceManager::getBufferName(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return "<invalid loc>";
+
+ return getBuffer(getFileID(Loc))->getBufferIdentifier();
+}
+
+
+/// getPresumedLoc - This method returns the "presumed" location of a
+/// SourceLocation specifies. A "presumed location" can be modified by #line
+/// or GNU line marker directives. This provides a view on the data that a
+/// user should see in diagnostics, for example.
+///
+/// Note that a presumed location is always given as the instantiation point
+/// of an instantiation location, not at the spelling location.
+PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return PresumedLoc();
+
+ // Presumed locations are always for instantiation points.
+ std::pair<FileID, unsigned> LocInfo = getDecomposedInstantiationLoc(Loc);
+
+ const SrcMgr::FileInfo &FI = getSLocEntry(LocInfo.first).getFile();
+ const SrcMgr::ContentCache *C = FI.getContentCache();
+
+ // To get the source name, first consult the FileEntry (if one exists)
+ // before the MemBuffer as this will avoid unnecessarily paging in the
+ // MemBuffer.
+ const char *Filename =
+ C->Entry ? C->Entry->getName() : C->getBuffer()->getBufferIdentifier();
+ unsigned LineNo = getLineNumber(LocInfo.first, LocInfo.second);
+ unsigned ColNo = getColumnNumber(LocInfo.first, LocInfo.second);
+ SourceLocation IncludeLoc = FI.getIncludeLoc();
+
+ // If we have #line directives in this file, update and overwrite the physical
+ // location info if appropriate.
+ if (FI.hasLineDirectives()) {
+ assert(LineTable && "Can't have linetable entries without a LineTable!");
+ // See if there is a #line directive before this. If so, get it.
+ if (const LineEntry *Entry =
+ LineTable->FindNearestLineEntry(LocInfo.first.ID, LocInfo.second)) {
+ // If the LineEntry indicates a filename, use it.
+ if (Entry->FilenameID != -1)
+ Filename = LineTable->getFilename(Entry->FilenameID);
+
+ // Use the line number specified by the LineEntry. This line number may
+ // be multiple lines down from the line entry. Add the difference in
+ // physical line numbers from the query point and the line marker to the
+ // total.
+ unsigned MarkerLineNo = getLineNumber(LocInfo.first, Entry->FileOffset);
+ LineNo = Entry->LineNo + (LineNo-MarkerLineNo-1);
+
+ // Note that column numbers are not molested by line markers.
+
+ // Handle virtual #include manipulation.
+ if (Entry->IncludeOffset) {
+ IncludeLoc = getLocForStartOfFile(LocInfo.first);
+ IncludeLoc = IncludeLoc.getFileLocWithOffset(Entry->IncludeOffset);
+ }
+ }
+ }
+
+ return PresumedLoc(Filename, LineNo, ColNo, IncludeLoc);
+}
+
+//===----------------------------------------------------------------------===//
+// Other miscellaneous methods.
+//===----------------------------------------------------------------------===//
+
+
+/// PrintStats - Print statistics to stderr.
+///
+void SourceManager::PrintStats() const {
+ llvm::cerr << "\n*** Source Manager Stats:\n";
+ llvm::cerr << FileInfos.size() << " files mapped, " << MemBufferInfos.size()
+ << " mem buffers mapped.\n";
+ llvm::cerr << SLocEntryTable.size() << " SLocEntry's allocated, "
+ << NextOffset << "B of Sloc address space used.\n";
+
+ unsigned NumLineNumsComputed = 0;
+ unsigned NumFileBytesMapped = 0;
+ for (fileinfo_iterator I = fileinfo_begin(), E = fileinfo_end(); I != E; ++I){
+ NumLineNumsComputed += I->second->SourceLineCache != 0;
+ NumFileBytesMapped += I->second->getSizeBytesMapped();
+ }
+
+ llvm::cerr << NumFileBytesMapped << " bytes of files mapped, "
+ << NumLineNumsComputed << " files with line #'s computed.\n";
+ llvm::cerr << "FileID scans: " << NumLinearScans << " linear, "
+ << NumBinaryProbes << " binary.\n";
+}
+
+ExternalSLocEntrySource::~ExternalSLocEntrySource() { }
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
new file mode 100644
index 0000000..1e8ca2b
--- /dev/null
+++ b/lib/Basic/TargetInfo.cpp
@@ -0,0 +1,295 @@
+//===--- TargetInfo.cpp - Information about Target machine ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TargetInfo and TargetInfoImpl interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cstdlib>
+using namespace clang;
+
+// TargetInfo Constructor.
+TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
+ // Set defaults. Defaults are set for a 32-bit RISC platform,
+ // like PPC or SPARC.
+ // These should be overridden by concrete targets as needed.
+ CharIsSigned = true;
+ TLSSupported = true;
+ PointerWidth = PointerAlign = 32;
+ WCharWidth = WCharAlign = 32;
+ IntWidth = IntAlign = 32;
+ LongWidth = LongAlign = 32;
+ LongLongWidth = LongLongAlign = 64;
+ FloatWidth = 32;
+ FloatAlign = 32;
+ DoubleWidth = 64;
+ DoubleAlign = 64;
+ LongDoubleWidth = 64;
+ LongDoubleAlign = 64;
+ IntMaxTWidth = 64;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntMaxType = SignedLongLong;
+ UIntMaxType = UnsignedLongLong;
+ IntPtrType = SignedLong;
+ WCharType = SignedInt;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ DoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64";
+ UserLabelPrefix = "_";
+}
+
+// Out of line virtual dtor for TargetInfo.
+TargetInfo::~TargetInfo() {}
+
+/// getTypeName - Return the user string for the specified integer type enum.
+/// For example, SignedShort -> "short".
+const char *TargetInfo::getTypeName(IntType T) {
+ switch (T) {
+ default: assert(0 && "not an integer!");
+ case SignedShort: return "short";
+ case UnsignedShort: return "unsigned short";
+ case SignedInt: return "int";
+ case UnsignedInt: return "unsigned int";
+ case SignedLong: return "long int";
+ case UnsignedLong: return "long unsigned int";
+ case SignedLongLong: return "long long int";
+ case UnsignedLongLong: return "long long unsigned int";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+
+
+static void removeGCCRegisterPrefix(const char *&Name) {
+ if (Name[0] == '%' || Name[0] == '#')
+ Name++;
+}
+
+/// isValidGCCRegisterName - Returns whether the passed in string
+/// is a valid register name according to GCC. This is used by Sema for
+/// inline asm statements.
+bool TargetInfo::isValidGCCRegisterName(const char *Name) const {
+ const char * const *Names;
+ unsigned NumNames;
+
+ // Get rid of any register prefix.
+ removeGCCRegisterPrefix(Name);
+
+
+ if (strcmp(Name, "memory") == 0 ||
+ strcmp(Name, "cc") == 0)
+ return true;
+
+ getGCCRegNames(Names, NumNames);
+
+ // If we have a number it maps to an entry in the register name array.
+ if (isdigit(Name[0])) {
+ char *End;
+ int n = (int)strtol(Name, &End, 0);
+ if (*End == 0)
+ return n >= 0 && (unsigned)n < NumNames;
+ }
+
+ // Check register names.
+ for (unsigned i = 0; i < NumNames; i++) {
+ if (strcmp(Name, Names[i]) == 0)
+ return true;
+ }
+
+ // Now check aliases.
+ const GCCRegAlias *Aliases;
+ unsigned NumAliases;
+
+ getGCCRegAliases(Aliases, NumAliases);
+ for (unsigned i = 0; i < NumAliases; i++) {
+ for (unsigned j = 0 ; j < llvm::array_lengthof(Aliases[i].Aliases); j++) {
+ if (!Aliases[i].Aliases[j])
+ break;
+ if (strcmp(Aliases[i].Aliases[j], Name) == 0)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+const char *TargetInfo::getNormalizedGCCRegisterName(const char *Name) const {
+ assert(isValidGCCRegisterName(Name) && "Invalid register passed in");
+
+ removeGCCRegisterPrefix(Name);
+
+ const char * const *Names;
+ unsigned NumNames;
+
+ getGCCRegNames(Names, NumNames);
+
+ // First, check if we have a number.
+ if (isdigit(Name[0])) {
+ char *End;
+ int n = (int)strtol(Name, &End, 0);
+ if (*End == 0) {
+ assert(n >= 0 && (unsigned)n < NumNames &&
+ "Out of bounds register number!");
+ return Names[n];
+ }
+ }
+
+ // Now check aliases.
+ const GCCRegAlias *Aliases;
+ unsigned NumAliases;
+
+ getGCCRegAliases(Aliases, NumAliases);
+ for (unsigned i = 0; i < NumAliases; i++) {
+ for (unsigned j = 0 ; j < llvm::array_lengthof(Aliases[i].Aliases); j++) {
+ if (!Aliases[i].Aliases[j])
+ break;
+ if (strcmp(Aliases[i].Aliases[j], Name) == 0)
+ return Aliases[i].Register;
+ }
+ }
+
+ return Name;
+}
+
+bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
+ const char *Name = Info.getConstraintStr().c_str();
+ // An output constraint must start with '=' or '+'
+ if (*Name != '=' && *Name != '+')
+ return false;
+
+ if (*Name == '+')
+ Info.setIsReadWrite();
+
+ Name++;
+ while (*Name) {
+ switch (*Name) {
+ default:
+ if (!validateAsmConstraint(Name, Info)) {
+ // FIXME: We temporarily return false
+ // so we can add more constraints as we hit it.
+ // Eventually, an unknown constraint should just be treated as 'g'.
+ return false;
+ }
+ case '&': // early clobber.
+ break;
+ case 'r': // general register.
+ Info.setAllowsRegister();
+ break;
+ case 'm': // memory operand.
+ Info.setAllowsMemory();
+ break;
+ case 'g': // general register, memory operand or immediate integer.
+ case 'X': // any operand.
+ Info.setAllowsRegister();
+ Info.setAllowsMemory();
+ break;
+ }
+
+ Name++;
+ }
+
+ return true;
+}
+
+bool TargetInfo::resolveSymbolicName(const char *&Name,
+ ConstraintInfo *OutputConstraints,
+ unsigned NumOutputs,
+ unsigned &Index) const {
+ assert(*Name == '[' && "Symbolic name did not start with '['");
+ Name++;
+ const char *Start = Name;
+ while (*Name && *Name != ']')
+ Name++;
+
+ if (!*Name) {
+ // Missing ']'
+ return false;
+ }
+
+ std::string SymbolicName(Start, Name - Start);
+
+ for (Index = 0; Index != NumOutputs; ++Index)
+ if (SymbolicName == OutputConstraints[Index].getName())
+ return true;
+
+ return false;
+}
+
+bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
+ unsigned NumOutputs,
+ ConstraintInfo &Info) const {
+ const char *Name = Info.ConstraintStr.c_str();
+
+ while (*Name) {
+ switch (*Name) {
+ default:
+ // Check if we have a matching constraint
+ if (*Name >= '0' && *Name <= '9') {
+ unsigned i = *Name - '0';
+
+ // Check if matching constraint is out of bounds.
+ if (i >= NumOutputs)
+ return false;
+
+ // The constraint should have the same info as the respective
+ // output constraint.
+ Info.setTiedOperand(i, OutputConstraints[i]);
+ } else if (!validateAsmConstraint(Name, Info)) {
+ // FIXME: This error return is in place temporarily so we can
+ // add more constraints as we hit it. Eventually, an unknown
+ // constraint should just be treated as 'g'.
+ return false;
+ }
+ break;
+ case '[': {
+ unsigned Index = 0;
+ if (!resolveSymbolicName(Name, OutputConstraints, NumOutputs, Index))
+ return false;
+
+ break;
+ }
+ case '%': // commutative
+ // FIXME: Fail if % is used with the last operand.
+ break;
+ case 'i': // immediate integer.
+ case 'n': // immediate integer with a known value.
+ break;
+ case 'I': // Various constant constraints with target-specific meanings.
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ break;
+ case 'r': // general register.
+ Info.setAllowsRegister();
+ break;
+ case 'm': // memory operand.
+ Info.setAllowsMemory();
+ break;
+ case 'g': // general register, memory operand or immediate integer.
+ case 'X': // any operand.
+ Info.setAllowsRegister();
+ Info.setAllowsMemory();
+ break;
+ }
+
+ Name++;
+ }
+
+ return true;
+}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
new file mode 100644
index 0000000..4b94bcf
--- /dev/null
+++ b/lib/Basic/Targets.cpp
@@ -0,0 +1,1500 @@
+//===--- Targets.cpp - Implement -arch option and targets -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements construction of a TargetInfo object from a
+// target triple.
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: Layering violation
+#include "clang/AST/Builtins.h"
+#include "clang/AST/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Common code shared among targets.
+//===----------------------------------------------------------------------===//
+
+static void Define(std::vector<char> &Buf, const char *Macro,
+ const char *Val = "1") {
+ const char *Def = "#define ";
+ Buf.insert(Buf.end(), Def, Def+strlen(Def));
+ Buf.insert(Buf.end(), Macro, Macro+strlen(Macro));
+ Buf.push_back(' ');
+ Buf.insert(Buf.end(), Val, Val+strlen(Val));
+ Buf.push_back('\n');
+}
+
+/// DefineStd - Define a macro name and standard variants. For example if
+/// MacroName is "unix", then this will define "__unix", "__unix__", and "unix"
+/// when in GNU mode.
+static void DefineStd(std::vector<char> &Buf, const char *MacroName,
+ const LangOptions &Opts) {
+ assert(MacroName[0] != '_' && "Identifier should be in the user's namespace");
+
+ // If in GNU mode (e.g. -std=gnu99 but not -std=c99) define the raw identifier
+ // in the user's namespace.
+ if (Opts.GNUMode)
+ Define(Buf, MacroName);
+
+ // Define __unix.
+ llvm::SmallString<20> TmpStr;
+ TmpStr = "__";
+ TmpStr += MacroName;
+ Define(Buf, TmpStr.c_str());
+
+ // Define __unix__.
+ TmpStr += "__";
+ Define(Buf, TmpStr.c_str());
+}
+
+//===----------------------------------------------------------------------===//
+// Defines specific to certain operating systems.
+//===----------------------------------------------------------------------===//
+
+static void getSolarisDefines(const LangOptions &Opts, std::vector<char> &Defs) {
+ DefineStd(Defs, "sun", Opts);
+ DefineStd(Defs, "unix", Opts);
+ Define(Defs, "__ELF__");
+ Define(Defs, "__svr4__");
+ Define(Defs, "__SVR4");
+}
+
+static void getFreeBSDDefines(const LangOptions &Opts, bool is64Bit,
+ const char *Triple, std::vector<char> &Defs) {
+ // FreeBSD defines; list based off of gcc output
+
+ const char *FreeBSD = strstr(Triple, "-freebsd");
+ FreeBSD += strlen("-freebsd");
+ char release[] = "X";
+ release[0] = FreeBSD[0];
+ char version[] = "X00001";
+ version[0] = FreeBSD[0];
+
+ Define(Defs, "__FreeBSD__", release);
+ Define(Defs, "__FreeBSD_cc_version", version);
+ Define(Defs, "__KPRINTF_ATTRIBUTE__");
+ DefineStd(Defs, "unix", Opts);
+ Define(Defs, "__ELF__", "1");
+ if (is64Bit) {
+ Define(Defs, "__LP64__");
+ }
+}
+
+static void getDragonFlyDefines(const LangOptions &Opts,
+ std::vector<char> &Defs) {
+ // DragonFly defines; list based off of gcc output
+ Define(Defs, "__DragonFly__");
+ Define(Defs, "__DragonFly_cc_version", "100001");
+ Define(Defs, "__ELF__");
+ Define(Defs, "__KPRINTF_ATTRIBUTE__");
+ Define(Defs, "__tune_i386__");
+ DefineStd(Defs, "unix", Opts);
+}
+
+static void getLinuxDefines(const LangOptions &Opts, std::vector<char> &Defs) {
+ // Linux defines; list based off of gcc output
+ DefineStd(Defs, "unix", Opts);
+ DefineStd(Defs, "linux", Opts);
+ Define(Defs, "__gnu_linux__");
+ Define(Defs, "__ELF__", "1");
+}
+
+/// getDarwinNumber - Parse the 'darwin number' out of the specific targe
+/// triple. For example, if we have darwin8.5 return 8,5,0. If any entry is
+/// not defined, return 0's. Return true if we have -darwin in the string or
+/// false otherwise.
+static bool getDarwinNumber(const char *Triple, unsigned &Maj, unsigned &Min, unsigned &Revision) {
+ Maj = Min = Revision = 0;
+ const char *Darwin = strstr(Triple, "-darwin");
+ if (Darwin == 0) return false;
+
+ Darwin += strlen("-darwin");
+ if (Darwin[0] < '0' || Darwin[0] > '9')
+ return true;
+
+ Maj = Darwin[0]-'0';
+ ++Darwin;
+
+ // Handle "darwin11".
+ if (Maj == 1 && Darwin[0] >= '0' && Darwin[0] <= '9') {
+ Maj = Maj*10 + (Darwin[0] - '0');
+ ++Darwin;
+ }
+
+ // Handle minor version: 10.4.9 -> darwin8.9 -> "1049"
+ if (Darwin[0] != '.')
+ return true;
+
+ ++Darwin;
+ if (Darwin[0] < '0' || Darwin[0] > '9')
+ return true;
+
+ Min = Darwin[0]-'0';
+ ++Darwin;
+
+ // Handle 10.4.11 -> darwin8.11
+ if (Min == 1 && Darwin[0] >= '0' && Darwin[0] <= '9') {
+ Min = Min*10 + (Darwin[0] - '0');
+ ++Darwin;
+ }
+
+ // Handle revision darwin8.9.1
+ if (Darwin[0] != '.')
+ return true;
+
+ ++Darwin;
+ if (Darwin[0] < '0' || Darwin[0] > '9')
+ return true;
+
+ Revision = Darwin[0]-'0';
+ ++Darwin;
+
+ if (Revision == 1 && Darwin[0] >= '0' && Darwin[0] <= '9') {
+ Revision = Revision*10 + (Darwin[0] - '0');
+ ++Darwin;
+ }
+
+ return true;
+}
+
+static void getDarwinDefines(std::vector<char> &Defs, const LangOptions &Opts) {
+ Define(Defs, "__APPLE__");
+ Define(Defs, "__MACH__");
+ Define(Defs, "OBJC_NEW_PROPERTIES");
+
+ // __weak is always defined, for use in blocks and with objc pointers.
+ Define(Defs, "__weak", "__attribute__((objc_gc(weak)))");
+
+ // Darwin defines __strong even in C mode (just to nothing).
+ if (!Opts.ObjC1 || Opts.getGCMode() == LangOptions::NonGC)
+ Define(Defs, "__strong", "");
+ else
+ Define(Defs, "__strong", "__attribute__((objc_gc(strong)))");
+}
+
+static void getDarwinOSXDefines(std::vector<char> &Defs, const char *Triple) {
+ // Figure out which "darwin number" the target triple is. "darwin9" -> 10.5.
+ unsigned Maj, Min, Rev;
+ if (getDarwinNumber(Triple, Maj, Min, Rev)) {
+ char MacOSXStr[] = "1000";
+ if (Maj >= 4 && Maj <= 13) { // 10.0-10.9
+ // darwin7 -> 1030, darwin8 -> 1040, darwin9 -> 1050, etc.
+ MacOSXStr[2] = '0' + Maj-4;
+ }
+
+ // Handle minor version: 10.4.9 -> darwin8.9 -> "1049"
+ // Cap 10.4.11 -> darwin8.11 -> "1049"
+ MacOSXStr[3] = std::min(Min, 9U)+'0';
+ Define(Defs, "__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", MacOSXStr);
+ }
+}
+
+static void getDarwinIPhoneOSDefines(std::vector<char> &Defs,
+ const char *Triple) {
+ // Figure out which "darwin number" the target triple is. "darwin9" -> 10.5.
+ unsigned Maj, Min, Rev;
+ if (getDarwinNumber(Triple, Maj, Min, Rev)) {
+ // When targetting iPhone OS, interpret the minor version and
+ // revision as the iPhone OS version
+ char iPhoneOSStr[] = "10000";
+ if (Min >= 2 && Min <= 9) { // iPhone OS 2.0-9.0
+ // darwin9.2.0 -> 20000, darwin9.3.0 -> 30000, etc.
+ iPhoneOSStr[0] = '0' + Min;
+ }
+
+ // Handle minor version: 2.2 -> darwin9.2.2 -> 20200
+ iPhoneOSStr[2] = std::min(Rev, 9U)+'0';
+ Define(Defs, "__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
+ iPhoneOSStr);
+ }
+}
+
+/// GetDarwinLanguageOptions - Set the default language options for darwin.
+static void GetDarwinLanguageOptions(LangOptions &Opts,
+ const char *Triple) {
+ Opts.NeXTRuntime = true;
+
+ unsigned Maj, Min, Rev;
+ if (!getDarwinNumber(Triple, Maj, Min, Rev))
+ return;
+
+ // Blocks default to on for 10.6 (darwin10) and beyond.
+ // As does nonfragile-abi for 64bit mode
+ if (Maj > 9)
+ Opts.Blocks = 1;
+
+ if (Maj >= 9 && Opts.ObjC1 && !strncmp(Triple, "x86_64", 6))
+ Opts.ObjCNonFragileABI = 1;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Specific target implementations.
+//===----------------------------------------------------------------------===//
+
+namespace {
+// PPC abstract base class
+class PPCTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char * const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+
+public:
+ PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ CharIsSigned = false;
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const;
+
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ // This is the right definition for ABI/V4: System V.4/eabi.
+ /*return "typedef struct __va_list_tag {"
+ " unsigned char gpr;"
+ " unsigned char fpr;"
+ " unsigned short reserved;"
+ " void* overflow_arg_area;"
+ " void* reg_save_area;"
+ "} __builtin_va_list[1];";*/
+ }
+ virtual const char *getTargetPrefix() const {
+ return "ppc";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default: return false;
+ case 'O': // Zero
+ return true;
+ case 'b': // Base register
+ case 'f': // Floating point register
+ Info.setAllowsRegister();
+ return true;
+ }
+ }
+ virtual const char *getClobbers() const {
+ return "";
+ }
+};
+
+const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, false },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER, false },
+#include "clang/AST/PPCBuiltins.def"
+};
+
+
+/// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific
+/// #defines that are not tied to a specific subtarget.
+void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defs) const {
+ // Target identification.
+ Define(Defs, "__ppc__");
+ Define(Defs, "_ARCH_PPC");
+ Define(Defs, "__POWERPC__");
+ if (PointerWidth == 64) {
+ Define(Defs, "_ARCH_PPC64");
+ Define(Defs, "_LP64");
+ Define(Defs, "__LP64__");
+ Define(Defs, "__ppc64__");
+ } else {
+ Define(Defs, "__ppc__");
+ }
+
+ // Target properties.
+ Define(Defs, "_BIG_ENDIAN");
+ Define(Defs, "__BIG_ENDIAN__");
+
+ // Subtarget options.
+ Define(Defs, "__NATURAL_ALIGNMENT__");
+ Define(Defs, "__REGISTER_PREFIX__", "");
+
+ // FIXME: Should be controlled by command line option.
+ Define(Defs, "__LONG_DOUBLE_128__");
+}
+
+
+const char * const PPCTargetInfo::GCCRegNames[] = {
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "mq", "lr", "ctr", "ap",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "xer",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "vrsave", "vscr",
+ "spe_acc", "spefscr",
+ "sfp"
+};
+
+void PPCTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = {
+ // While some of these aliases do map to different registers
+ // they still share the same register name.
+ { { "cc", "cr0", "fr0", "r0", "v0"}, "0" },
+ { { "cr1", "fr1", "r1", "sp", "v1"}, "1" },
+ { { "cr2", "fr2", "r2", "toc", "v2"}, "2" },
+ { { "cr3", "fr3", "r3", "v3"}, "3" },
+ { { "cr4", "fr4", "r4", "v4"}, "4" },
+ { { "cr5", "fr5", "r5", "v5"}, "5" },
+ { { "cr6", "fr6", "r6", "v6"}, "6" },
+ { { "cr7", "fr7", "r7", "v7"}, "7" },
+ { { "fr8", "r8", "v8"}, "8" },
+ { { "fr9", "r9", "v9"}, "9" },
+ { { "fr10", "r10", "v10"}, "10" },
+ { { "fr11", "r11", "v11"}, "11" },
+ { { "fr12", "r12", "v12"}, "12" },
+ { { "fr13", "r13", "v13"}, "13" },
+ { { "fr14", "r14", "v14"}, "14" },
+ { { "fr15", "r15", "v15"}, "15" },
+ { { "fr16", "r16", "v16"}, "16" },
+ { { "fr17", "r17", "v17"}, "17" },
+ { { "fr18", "r18", "v18"}, "18" },
+ { { "fr19", "r19", "v19"}, "19" },
+ { { "fr20", "r20", "v20"}, "20" },
+ { { "fr21", "r21", "v21"}, "21" },
+ { { "fr22", "r22", "v22"}, "22" },
+ { { "fr23", "r23", "v23"}, "23" },
+ { { "fr24", "r24", "v24"}, "24" },
+ { { "fr25", "r25", "v25"}, "25" },
+ { { "fr26", "r26", "v26"}, "26" },
+ { { "fr27", "r27", "v27"}, "27" },
+ { { "fr28", "r28", "v28"}, "28" },
+ { { "fr29", "r29", "v29"}, "29" },
+ { { "fr30", "r30", "v30"}, "30" },
+ { { "fr31", "r31", "v31"}, "31" },
+};
+
+void PPCTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+} // end anonymous namespace.
+
+namespace {
+class PPC32TargetInfo : public PPCTargetInfo {
+public:
+ PPC32TargetInfo(const std::string& triple) : PPCTargetInfo(triple) {
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128";
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+class PPC64TargetInfo : public PPCTargetInfo {
+public:
+ PPC64TargetInfo(const std::string& triple) : PPCTargetInfo(triple) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128";
+ }
+};
+} // end anonymous namespace.
+
+
+namespace {
+class DarwinPPCTargetInfo : public PPC32TargetInfo {
+public:
+ DarwinPPCTargetInfo(const std::string& triple) : PPC32TargetInfo(triple) {}
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ PPC32TargetInfo::getTargetDefines(Opts, Defines);
+ getDarwinDefines(Defines, Opts);
+ getDarwinOSXDefines(Defines, getTargetTriple());
+ }
+
+ /// getDefaultLangOptions - Allow the target to specify default settings for
+ /// various language options. These may be overridden by command line
+ /// options.
+ virtual void getDefaultLangOptions(LangOptions &Opts) {
+ GetDarwinLanguageOptions(Opts, getTargetTriple());
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+class DarwinPPC64TargetInfo : public PPC64TargetInfo {
+public:
+ DarwinPPC64TargetInfo(const std::string& triple) : PPC64TargetInfo(triple) {}
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ PPC64TargetInfo::getTargetDefines(Opts, Defines);
+ getDarwinDefines(Defines, Opts);
+ getDarwinOSXDefines(Defines, getTargetTriple());
+ }
+
+ /// getDefaultLangOptions - Allow the target to specify default settings for
+ /// various language options. These may be overridden by command line
+ /// options.
+ virtual void getDefaultLangOptions(LangOptions &Opts) {
+ GetDarwinLanguageOptions(Opts, getTargetTriple());
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+// Namespace for x86 abstract base class
+const Builtin::Info BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, false },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER, false },
+#include "clang/AST/X86Builtins.def"
+};
+
+const char *GCCRegNames[] = {
+ "ax", "dx", "cx", "bx", "si", "di", "bp", "sp",
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+ "argp", "flags", "fspr", "dirflag", "frame",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
+};
+
+const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ { { "al", "ah", "eax", "rax" }, "ax" },
+ { { "bl", "bh", "ebx", "rbx" }, "bx" },
+ { { "cl", "ch", "ecx", "rcx" }, "cx" },
+ { { "dl", "dh", "edx", "rdx" }, "dx" },
+ { { "esi", "rsi" }, "si" },
+ { { "edi", "rdi" }, "di" },
+ { { "esp", "rsp" }, "sp" },
+ { { "ebp", "rbp" }, "bp" },
+};
+
+// X86 target abstract base class; x86-32 and x86-64 are very close, so
+// most of the implementation can be shared.
+class X86TargetInfo : public TargetInfo {
+ enum X86SSEEnum {
+ NoMMXSSE, MMX, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42
+ } SSELevel;
+public:
+ X86TargetInfo(const std::string& triple)
+ : TargetInfo(triple), SSELevel(NoMMXSSE) {
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+ virtual const char *getTargetPrefix() const {
+ return "x86";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+ }
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const;
+ virtual std::string convertConstraint(const char Constraint) const;
+ virtual const char *getClobbers() const {
+ return "~{dirflag},~{fpsr},~{flags}";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const;
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ const std::string &Name,
+ bool Enabled) const;
+ virtual void getDefaultFeatures(const std::string &CPU,
+ llvm::StringMap<bool> &Features) const;
+ virtual void HandleTargetFeatures(const llvm::StringMap<bool> &Features);
+};
+
+void X86TargetInfo::getDefaultFeatures(const std::string &CPU,
+ llvm::StringMap<bool> &Features) const {
+ // FIXME: This should not be here.
+ Features["3dnow"] = false;
+ Features["3dnowa"] = false;
+ Features["mmx"] = false;
+ Features["sse"] = false;
+ Features["sse2"] = false;
+ Features["sse3"] = false;
+ Features["ssse3"] = false;
+ Features["sse41"] = false;
+ Features["sse42"] = false;
+
+ // LLVM does not currently recognize this.
+ // Features["sse4a"] = false;
+
+ // FIXME: This *really* should not be here.
+
+ // X86_64 always has SSE2.
+ if (PointerWidth == 64)
+ Features["sse2"] = Features["sse"] = Features["mmx"] = true;
+
+ if (CPU == "generic" || CPU == "i386" || CPU == "i486" || CPU == "i586" ||
+ CPU == "pentium" || CPU == "i686" || CPU == "pentiumpro")
+ ;
+ else if (CPU == "pentium-mmx" || CPU == "pentium2")
+ setFeatureEnabled(Features, "mmx", true);
+ else if (CPU == "pentium3")
+ setFeatureEnabled(Features, "sse", true);
+ else if (CPU == "pentium-m" || CPU == "pentium4" || CPU == "x86-64")
+ setFeatureEnabled(Features, "sse2", true);
+ else if (CPU == "yonah" || CPU == "prescott" || CPU == "nocona")
+ setFeatureEnabled(Features, "sse3", true);
+ else if (CPU == "core2")
+ setFeatureEnabled(Features, "ssse3", true);
+ else if (CPU == "penryn") {
+ setFeatureEnabled(Features, "sse4", true);
+ Features["sse42"] = false;
+ } else if (CPU == "atom")
+ setFeatureEnabled(Features, "sse3", true);
+ else if (CPU == "corei7")
+ setFeatureEnabled(Features, "sse4", true);
+ else if (CPU == "k6" || CPU == "winchip-c6")
+ setFeatureEnabled(Features, "mmx", true);
+ else if (CPU == "k6-2" || CPU == "k6-3" || CPU == "athlon" ||
+ CPU == "athlon-tbird" || CPU == "winchip2" || CPU == "c3") {
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "3dnow", true);
+ } else if (CPU == "athlon-4" || CPU == "athlon-xp" || CPU == "athlon-mp") {
+ setFeatureEnabled(Features, "sse", true);
+ setFeatureEnabled(Features, "3dnowa", true);
+ } else if (CPU == "k8" || CPU == "opteron" || CPU == "athlon64" ||
+ CPU == "athlon-fx") {
+ setFeatureEnabled(Features, "sse2", true);
+ setFeatureEnabled(Features, "3dnowa", true);
+ } else if (CPU == "c3-2")
+ setFeatureEnabled(Features, "sse", true);
+}
+
+bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ const std::string &Name,
+ bool Enabled) const {
+ // FIXME: This *really* should not be here.
+ if (!Features.count(Name) && Name != "sse4")
+ return false;
+
+ if (Enabled) {
+ if (Name == "mmx")
+ Features["mmx"] = true;
+ else if (Name == "sse")
+ Features["mmx"] = Features["sse"] = true;
+ else if (Name == "sse2")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = true;
+ else if (Name == "sse3")
+ Features["mmx"] = Features["sse"] = Features["sse2"] =
+ Features["sse3"] = true;
+ else if (Name == "ssse3")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = true;
+ else if (Name == "sse4")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] = true;
+ else if (Name == "3dnow")
+ Features["3dnowa"] = true;
+ else if (Name == "3dnowa")
+ Features["3dnow"] = Features["3dnowa"] = true;
+ } else {
+ if (Name == "mmx")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
+ else if (Name == "sse")
+ Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
+ else if (Name == "sse2")
+ Features["sse2"] = Features["sse3"] = Features["ssse3"] =
+ Features["sse41"] = Features["sse42"] = false;
+ else if (Name == "sse3")
+ Features["sse3"] = Features["ssse3"] = Features["sse41"] =
+ Features["sse42"] = false;
+ else if (Name == "ssse3")
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
+ else if (Name == "sse4")
+ Features["sse41"] = Features["sse42"] = false;
+ else if (Name == "3dnow")
+ Features["3dnow"] = Features["3dnowa"] = false;
+ else if (Name == "3dnowa")
+ Features["3dnowa"] = false;
+ }
+
+ return true;
+}
+
+/// HandleTargetOptions - Perform initialization based on the user
+/// configured set of features.
+void X86TargetInfo::HandleTargetFeatures(const llvm::StringMap<bool>&Features) {
+ if (Features.lookup("sse42"))
+ SSELevel = SSE42;
+ else if (Features.lookup("sse41"))
+ SSELevel = SSE41;
+ else if (Features.lookup("ssse3"))
+ SSELevel = SSSE3;
+ else if (Features.lookup("sse3"))
+ SSELevel = SSE3;
+ else if (Features.lookup("sse2"))
+ SSELevel = SSE2;
+ else if (Features.lookup("sse"))
+ SSELevel = SSE1;
+ else if (Features.lookup("mmx"))
+ SSELevel = MMX;
+}
+
+/// X86TargetInfo::getTargetDefines - Return a set of the X86-specific #defines
+/// that are not tied to a specific subtarget.
+void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defs) const {
+ // Target identification.
+ if (PointerWidth == 64) {
+ Define(Defs, "_LP64");
+ Define(Defs, "__LP64__");
+ Define(Defs, "__amd64__");
+ Define(Defs, "__amd64");
+ Define(Defs, "__x86_64");
+ Define(Defs, "__x86_64__");
+ } else {
+ DefineStd(Defs, "i386", Opts);
+ }
+
+ // Target properties.
+ Define(Defs, "__LITTLE_ENDIAN__");
+
+ // Subtarget options.
+ Define(Defs, "__nocona");
+ Define(Defs, "__nocona__");
+ Define(Defs, "__tune_nocona__");
+ Define(Defs, "__REGISTER_PREFIX__", "");
+
+ // Define __NO_MATH_INLINES on linux/x86 so that we don't get inline
+ // functions in glibc header files that use FP Stack inline asm which the
+ // backend can't deal with (PR879).
+ Define(Defs, "__NO_MATH_INLINES");
+
+ // Each case falls through to the previous one here.
+ switch (SSELevel) {
+ case SSE42:
+ Define(Defs, "__SSE4_2__");
+ case SSE41:
+ Define(Defs, "__SSE4_1__");
+ case SSSE3:
+ Define(Defs, "__SSSE3__");
+ case SSE3:
+ Define(Defs, "__SSE3__");
+ case SSE2:
+ Define(Defs, "__SSE2__");
+ Define(Defs, "__SSE2_MATH__"); // -mfp-math=sse always implied.
+ case SSE1:
+ Define(Defs, "__SSE__");
+ Define(Defs, "__SSE_MATH__"); // -mfp-math=sse always implied.
+ case MMX:
+ Define(Defs, "__MMX__");
+ case NoMMXSSE:
+ break;
+ }
+}
+
+
+bool
+X86TargetInfo::validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default: return false;
+ case 'a': // eax.
+ case 'b': // ebx.
+ case 'c': // ecx.
+ case 'd': // edx.
+ case 'S': // esi.
+ case 'D': // edi.
+ case 'A': // edx:eax.
+ case 't': // top of floating point stack.
+ case 'u': // second from top of floating point stack.
+ case 'q': // Any register accessible as [r]l: a, b, c, and d.
+ case 'y': // Any MMX register.
+ case 'x': // Any SSE register.
+ case 'Q': // Any register accessible as [r]h: a, b, c, and d.
+ case 'e': // 32-bit signed integer constant for use with zero-extending
+ // x86_64 instructions.
+ case 'Z': // 32-bit unsigned integer constant for use with zero-extending
+ // x86_64 instructions.
+ case 'N': // unsigned 8-bit integer constant for use with in and out
+ // instructions.
+ Info.setAllowsRegister();
+ return true;
+ }
+}
+
+std::string
+X86TargetInfo::convertConstraint(const char Constraint) const {
+ switch (Constraint) {
+ case 'a': return std::string("{ax}");
+ case 'b': return std::string("{bx}");
+ case 'c': return std::string("{cx}");
+ case 'd': return std::string("{dx}");
+ case 'S': return std::string("{si}");
+ case 'D': return std::string("{di}");
+ case 't': // top of floating point stack.
+ return std::string("{st}");
+ case 'u': // second from top of floating point stack.
+ return std::string("{st(1)}"); // second from top of floating point stack.
+ default:
+ return std::string(1, Constraint);
+ }
+}
+} // end anonymous namespace
+
+namespace {
+// X86-32 generic target
+class X86_32TargetInfo : public X86TargetInfo {
+public:
+ X86_32TargetInfo(const std::string& triple) : X86TargetInfo(triple) {
+ DoubleAlign = LongLongAlign = 32;
+ LongDoubleWidth = 96;
+ LongDoubleAlign = 32;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
+ "a0:0:64-f80:32:32";
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ RegParmMax = 3;
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 Darwin (OS X) target
+class DarwinI386TargetInfo : public X86_32TargetInfo {
+public:
+ DarwinI386TargetInfo(const std::string& triple) : X86_32TargetInfo(triple) {
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
+ "a0:0:64-f80:128:128";
+ TLSSupported = false;
+ }
+
+ virtual const char *getStringSymbolPrefix(bool IsConstant) const {
+ return IsConstant ? "\01LC" : "\01lC";
+ }
+
+ virtual const char *getUnicodeStringSymbolPrefix() const {
+ return "__utf16_string_";
+ }
+
+ virtual const char *getUnicodeStringSection() const {
+ return "__TEXT,__ustring";
+ }
+
+ virtual const char *getCFStringSymbolPrefix() const {
+ return "\01LC";
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Defines);
+ getDarwinDefines(Defines, Opts);
+ getDarwinOSXDefines(Defines, getTargetTriple());
+ }
+
+ /// getDefaultLangOptions - Allow the target to specify default settings for
+ /// various language options. These may be overridden by command line
+ /// options.
+ virtual void getDefaultLangOptions(LangOptions &Opts) {
+ GetDarwinLanguageOptions(Opts, getTargetTriple());
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 FreeBSD target
+class FreeBSDX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ FreeBSDX86_32TargetInfo(const std::string& triple) :
+ X86_32TargetInfo(triple) { }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Defines);
+ getFreeBSDDefines(Opts, 0, getTargetTriple(), Defines);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 DragonFly target
+class DragonFlyX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ DragonFlyX86_32TargetInfo(const std::string& triple) :
+ X86_32TargetInfo(triple) { }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Defines);
+ getDragonFlyDefines(Opts, Defines);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 Linux target
+class LinuxX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ LinuxX86_32TargetInfo(const std::string& triple) : X86_32TargetInfo(triple) {
+ UserLabelPrefix = "";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Defines);
+ getLinuxDefines(Opts, Defines);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 Solaris target
+class SolarisX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ SolarisX86_32TargetInfo(const std::string& triple) : X86_32TargetInfo(triple) {
+ UserLabelPrefix = "";
+ WCharType = SignedLong;
+ // FIXME: WIntType should be SignedLong
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Defines);
+ getSolarisDefines(Opts, Defines);
+ }
+};
+} // end anonymous namespace
+
+
+namespace {
+// x86-32 Windows target
+class WindowsX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ WindowsX86_32TargetInfo(const std::string& triple)
+ : X86_32TargetInfo(triple) {
+ TLSSupported = false;
+ // FIXME: Fix wchar_t.
+ // FIXME: We should probably enable -fms-extensions by default for
+ // this target.
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Defines);
+ // This list is based off of the the list of things MingW defines
+ Define(Defines, "_WIN32");
+ DefineStd(Defines, "WIN32", Opts);
+ DefineStd(Defines, "WINNT", Opts);
+ Define(Defines, "_X86_");
+ Define(Defines, "__MSVCRT__");
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 generic target
+class X86_64TargetInfo : public X86TargetInfo {
+public:
+ X86_64TargetInfo(const std::string &triple) : X86TargetInfo(triple) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ DoubleAlign = LongLongAlign = 64;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ IntMaxType = SignedLong;
+ UIntMaxType = UnsignedLong;
+ RegParmMax = 6;
+
+ DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-"
+ "a0:0:64-f80:128:128";
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef struct __va_list_tag {"
+ " unsigned gp_offset;"
+ " unsigned fp_offset;"
+ " void* overflow_arg_area;"
+ " void* reg_save_area;"
+ "} __builtin_va_list[1];";
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 FreeBSD target
+class FreeBSDX86_64TargetInfo : public X86_64TargetInfo {
+public:
+ FreeBSDX86_64TargetInfo(const std::string &triple)
+ : X86_64TargetInfo(triple) {}
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_64TargetInfo::getTargetDefines(Opts, Defines);
+ getFreeBSDDefines(Opts, 1, getTargetTriple(), Defines);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 Linux target
+class LinuxX86_64TargetInfo : public X86_64TargetInfo {
+public:
+ LinuxX86_64TargetInfo(const std::string& triple) : X86_64TargetInfo(triple) {
+ UserLabelPrefix = "";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_64TargetInfo::getTargetDefines(Opts, Defines);
+ getLinuxDefines(Opts, Defines);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 Solaris target
+class SolarisX86_64TargetInfo : public X86_64TargetInfo {
+public:
+ SolarisX86_64TargetInfo(const std::string& triple) : X86_64TargetInfo(triple) {
+ UserLabelPrefix = "";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_64TargetInfo::getTargetDefines(Opts, Defines);
+ getSolarisDefines(Opts, Defines);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 Darwin (OS X) target
+class DarwinX86_64TargetInfo : public X86_64TargetInfo {
+public:
+ DarwinX86_64TargetInfo(const std::string& triple) : X86_64TargetInfo(triple) {
+ TLSSupported = false;
+ }
+
+ virtual const char *getStringSymbolPrefix(bool IsConstant) const {
+ return IsConstant ? "\01LC" : "\01lC";
+ }
+
+ virtual const char *getUnicodeStringSymbolPrefix() const {
+ return "__utf16_string_";
+ }
+
+ virtual const char *getUnicodeStringSection() const {
+ return "__TEXT,__ustring";
+ }
+
+ virtual const char *getCFStringSymbolPrefix() const {
+ return "\01L_unnamed_cfstring_";
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ X86_64TargetInfo::getTargetDefines(Opts, Defines);
+ getDarwinDefines(Defines, Opts);
+ getDarwinOSXDefines(Defines, getTargetTriple());
+ }
+
+ /// getDefaultLangOptions - Allow the target to specify default settings for
+ /// various language options. These may be overridden by command line
+ /// options.
+ virtual void getDefaultLangOptions(LangOptions &Opts) {
+ GetDarwinLanguageOptions(Opts, getTargetTriple());
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+class ARMTargetInfo : public TargetInfo {
+ enum {
+ Armv4t,
+ Armv5,
+ Armv6,
+ XScale
+ } ArmArch;
+public:
+ ARMTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ // FIXME: Are the defaults correct for ARM?
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:64";
+ if (triple.find("arm-") == 0 || triple.find("armv6-") == 0)
+ ArmArch = Armv6;
+ else if (triple.find("armv5-") == 0)
+ ArmArch = Armv5;
+ else if (triple.find("armv4t-") == 0)
+ ArmArch = Armv4t;
+ else if (triple.find("xscale-") == 0)
+ ArmArch = XScale;
+ else if (triple.find("armv") == 0) {
+ // FIXME: fuzzy match for other random weird arm triples. This is useful
+ // for the static analyzer and other clients, but probably should be
+ // re-evaluated when codegen is brought up.
+ ArmArch = Armv6;
+ }
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defs) const {
+ // Target identification.
+ Define(Defs, "__arm");
+ Define(Defs, "__arm__");
+
+ // Target properties.
+ Define(Defs, "__LITTLE_ENDIAN__");
+
+ // Subtarget options.
+ if (ArmArch == Armv6) {
+ Define(Defs, "__ARM_ARCH_6K__");
+ Define(Defs, "__THUMB_INTERWORK__");
+ } else if (ArmArch == Armv5) {
+ Define(Defs, "__ARM_ARCH_5TEJ__");
+ Define(Defs, "__THUMB_INTERWORK__");
+ Define(Defs, "__SOFTFP__");
+ } else if (ArmArch == Armv4t) {
+ Define(Defs, "__ARM_ARCH_4T__");
+ Define(Defs, "__SOFTFP__");
+ } else if (ArmArch == XScale) {
+ Define(Defs, "__ARM_ARCH_5TE__");
+ Define(Defs, "__XSCALE__");
+ Define(Defs, "__SOFTFP__");
+ }
+ Define(Defs, "__ARMEL__");
+ Define(Defs, "__APCS_32__");
+ Define(Defs, "__VFP_FP__");
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ // FIXME: Implement.
+ Records = 0;
+ NumRecords = 0;
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+ virtual const char *getTargetPrefix() const {
+ return "arm";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ // FIXME: Implement.
+ Names = 0;
+ NumNames = 0;
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ // FIXME: Implement.
+ Aliases = 0;
+ NumAliases = 0;
+ }
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ // FIXME: Check if this is complete
+ switch (*Name) {
+ default:
+ case 'l': // r0-r7
+ case 'h': // r8-r15
+ case 'w': // VFP Floating point register single precision
+ case 'P': // VFP Floating point register double precision
+ Info.setAllowsRegister();
+ return true;
+ }
+ return false;
+ }
+ virtual const char *getClobbers() const {
+ // FIXME: Is this really right?
+ return "";
+ }
+};
+} // end anonymous namespace.
+
+
+namespace {
+class DarwinARMTargetInfo : public ARMTargetInfo {
+public:
+ DarwinARMTargetInfo(const std::string& triple) : ARMTargetInfo(triple) {
+ TLSSupported = false;
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ ARMTargetInfo::getTargetDefines(Opts, Defines);
+ getDarwinDefines(Defines, Opts);
+ getDarwinIPhoneOSDefines(Defines, getTargetTriple());
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+// arm FreeBSD target
+class FreeBSDARMTargetInfo : public ARMTargetInfo {
+public:
+ FreeBSDARMTargetInfo(const std::string& triple) : ARMTargetInfo(triple) {}
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ ARMTargetInfo::getTargetDefines(Opts, Defines);
+ getFreeBSDDefines(Opts, 0, getTargetTriple(), Defines);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+class SparcV8TargetInfo : public TargetInfo {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ static const char * const GCCRegNames[];
+public:
+ SparcV8TargetInfo(const std::string& triple) : TargetInfo(triple) {
+ // FIXME: Support Sparc quad-precision long double?
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ DefineStd(Defines, "sparc", Opts);
+ Define(Defines, "__sparcv8");
+ Define(Defines, "__REGISTER_PREFIX__", "");
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ // FIXME: Implement!
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef void* __builtin_va_list;";
+ }
+ virtual const char *getTargetPrefix() const {
+ return "sparc";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ // FIXME: Implement!
+ return false;
+ }
+ virtual const char *getClobbers() const {
+ // FIXME: Implement!
+ return "";
+ }
+};
+
+const char * const SparcV8TargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+};
+
+void SparcV8TargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias SparcV8TargetInfo::GCCRegAliases[] = {
+ { { "g0" }, "r0" },
+ { { "g1" }, "r1" },
+ { { "g2" }, "r2" },
+ { { "g3" }, "r3" },
+ { { "g4" }, "r4" },
+ { { "g5" }, "r5" },
+ { { "g6" }, "r6" },
+ { { "g7" }, "r7" },
+ { { "o0" }, "r8" },
+ { { "o1" }, "r9" },
+ { { "o2" }, "r10" },
+ { { "o3" }, "r11" },
+ { { "o4" }, "r12" },
+ { { "o5" }, "r13" },
+ { { "o6", "sp" }, "r14" },
+ { { "o7" }, "r15" },
+ { { "l0" }, "r16" },
+ { { "l1" }, "r17" },
+ { { "l2" }, "r18" },
+ { { "l3" }, "r19" },
+ { { "l4" }, "r20" },
+ { { "l5" }, "r21" },
+ { { "l6" }, "r22" },
+ { { "l7" }, "r23" },
+ { { "i0" }, "r24" },
+ { { "i1" }, "r25" },
+ { { "i2" }, "r26" },
+ { { "i3" }, "r27" },
+ { { "i4" }, "r28" },
+ { { "i5" }, "r29" },
+ { { "i6", "fp" }, "r30" },
+ { { "i7" }, "r31" },
+};
+
+void SparcV8TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+} // end anonymous namespace.
+
+namespace {
+class SolarisSparcV8TargetInfo : public SparcV8TargetInfo {
+public:
+ SolarisSparcV8TargetInfo(const std::string& triple) :
+ SparcV8TargetInfo(triple) {
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ WCharType = SignedLong;
+ // FIXME: WIntType should be SignedLong
+ UserLabelPrefix = "";
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ SparcV8TargetInfo::getTargetDefines(Opts, Defines);
+ getSolarisDefines(Opts, Defines);
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+ class PIC16TargetInfo : public TargetInfo{
+ public:
+ PIC16TargetInfo(const std::string& triple) : TargetInfo(triple) {
+ TLSSupported = false;
+ IntWidth = 16;
+ LongWidth = LongLongWidth = 32;
+ IntMaxTWidth = 32;
+ PointerWidth = 16;
+ IntAlign = 8;
+ LongAlign = LongLongAlign = 8;
+ PointerAlign = 8;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLong;
+ UIntMaxType = UnsignedLong;
+ IntPtrType = SignedShort;
+ PtrDiffType = SignedInt;
+ FloatWidth = 32;
+ FloatAlign = 32;
+ DoubleWidth = 32;
+ DoubleAlign = 32;
+ LongDoubleWidth = 32;
+ LongDoubleAlign = 32;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ DoubleFormat = &llvm::APFloat::IEEEsingle;
+ LongDoubleFormat = &llvm::APFloat::IEEEsingle;
+ DescriptionString = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8-f32:32:32";
+
+ }
+ virtual uint64_t getPointerWidthV(unsigned AddrSpace) const { return 16; }
+ virtual uint64_t getPointerAlignV(unsigned AddrSpace) const { return 8; }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ Define(Defines, "__pic16");
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {}
+ virtual const char *getVAListDeclaration() const { return "";}
+ virtual const char *getClobbers() const {return "";}
+ virtual const char *getTargetPrefix() const {return "pic16";}
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {}
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ return true;
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {}
+ virtual bool useGlobalsForAutomaticVariables() const {return true;}
+ };
+}
+
+namespace {
+ class MSP430TargetInfo : public TargetInfo {
+ static const char * const GCCRegNames[];
+ public:
+ MSP430TargetInfo(const std::string& triple) : TargetInfo(triple) {
+ TLSSupported = false;
+ IntWidth = 16;
+ LongWidth = LongLongWidth = 32;
+ IntMaxTWidth = 32;
+ PointerWidth = 16;
+ IntAlign = 8;
+ LongAlign = LongLongAlign = 8;
+ PointerAlign = 8;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLong;
+ UIntMaxType = UnsignedLong;
+ IntPtrType = SignedShort;
+ PtrDiffType = SignedInt;
+ DescriptionString = "e-p:16:8:8-i8:8:8-i16:8:8-i32:8:8";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ std::vector<char> &Defines) const {
+ Define(Defines, "MSP430");
+ Define(Defines, "__MSP430__");
+ // FIXME: defines for different 'flavours' of MCU
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ // FIXME: Implement.
+ Records = 0;
+ NumRecords = 0;
+ }
+ virtual const char *getTargetPrefix() const {
+ return "msp430";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ // No aliases.
+ Aliases = 0;
+ NumAliases = 0;
+ }
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ // FIXME: implement
+ return true;
+ }
+ virtual const char *getClobbers() const {
+ // FIXME: Is this really right?
+ return "";
+ }
+ virtual const char *getVAListDeclaration() const {
+ // FIXME: implement
+ return "typedef char* __builtin_va_list;";
+ }
+ };
+
+ const char * const MSP430TargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+ };
+
+ void MSP430TargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Driver code
+//===----------------------------------------------------------------------===//
+
+static inline bool IsX86(const std::string& TT) {
+ return (TT.size() >= 5 && TT[0] == 'i' && TT[2] == '8' && TT[3] == '6' &&
+ TT[4] == '-' && TT[1] - '3' < 6);
+}
+
+/// CreateTargetInfo - Return the target info object for the specified target
+/// triple.
+TargetInfo* TargetInfo::CreateTargetInfo(const std::string &T) {
+ // OS detection; this isn't really anywhere near complete.
+ // Additions and corrections are welcome.
+ bool isDarwin = T.find("-darwin") != std::string::npos;
+ bool isDragonFly = T.find("-dragonfly") != std::string::npos;
+ bool isFreeBSD = T.find("-freebsd") != std::string::npos;
+ bool isSolaris = T.find("-solaris") != std::string::npos;
+ bool isLinux = T.find("-linux") != std::string::npos;
+ bool isWindows = T.find("-windows") != std::string::npos ||
+ T.find("-win32") != std::string::npos ||
+ T.find("-mingw") != std::string::npos;
+
+ if (T.find("ppc-") == 0 || T.find("powerpc-") == 0) {
+ if (isDarwin)
+ return new DarwinPPCTargetInfo(T);
+ return new PPC32TargetInfo(T);
+ }
+
+ if (T.find("ppc64-") == 0 || T.find("powerpc64-") == 0) {
+ if (isDarwin)
+ return new DarwinPPC64TargetInfo(T);
+ return new PPC64TargetInfo(T);
+ }
+
+ if (T.find("armv") == 0 || T.find("arm-") == 0 || T.find("xscale") == 0) {
+ if (isDarwin)
+ return new DarwinARMTargetInfo(T);
+ if (isFreeBSD)
+ return new FreeBSDARMTargetInfo(T);
+ return new ARMTargetInfo(T);
+ }
+
+ if (T.find("sparc-") == 0) {
+ if (isSolaris)
+ return new SolarisSparcV8TargetInfo(T);
+ return new SparcV8TargetInfo(T);
+ }
+
+ if (T.find("x86_64-") == 0 || T.find("amd64-") == 0) {
+ if (isDarwin)
+ return new DarwinX86_64TargetInfo(T);
+ if (isLinux)
+ return new LinuxX86_64TargetInfo(T);
+ if (isFreeBSD)
+ return new FreeBSDX86_64TargetInfo(T);
+ if (isSolaris)
+ return new SolarisX86_64TargetInfo(T);
+ return new X86_64TargetInfo(T);
+ }
+
+ if (T.find("pic16-") == 0)
+ return new PIC16TargetInfo(T);
+
+ if (T.find("msp430-") == 0)
+ return new MSP430TargetInfo(T);
+
+ if (IsX86(T)) {
+ if (isDarwin)
+ return new DarwinI386TargetInfo(T);
+ if (isLinux)
+ return new LinuxX86_32TargetInfo(T);
+ if (isDragonFly)
+ return new DragonFlyX86_32TargetInfo(T);
+ if (isFreeBSD)
+ return new FreeBSDX86_32TargetInfo(T);
+ if (isSolaris)
+ return new SolarisX86_32TargetInfo(T);
+ if (isWindows)
+ return new WindowsX86_32TargetInfo(T);
+ return new X86_32TargetInfo(T);
+ }
+
+ return NULL;
+}
diff --git a/lib/Basic/TokenKinds.cpp b/lib/Basic/TokenKinds.cpp
new file mode 100644
index 0000000..4afeaf0
--- /dev/null
+++ b/lib/Basic/TokenKinds.cpp
@@ -0,0 +1,90 @@
+//===--- TokenKinds.cpp - Token Kinds Support -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenKind enum and support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TokenKinds.h"
+
+#include <cassert>
+using namespace clang;
+
+static const char * const TokNames[] = {
+#define TOK(X) #X,
+#define KEYWORD(X,Y) #X,
+#include "clang/Basic/TokenKinds.def"
+ 0
+};
+
+const char *tok::getTokenName(enum TokenKind Kind) {
+ assert(Kind < tok::NUM_TOKENS);
+ return TokNames[Kind];
+}
+
+const char *tok::getTokenSimpleSpelling(enum TokenKind Kind) {
+ switch (Kind) {
+ case tok::l_square: return "[";
+ case tok::r_square: return "]";
+ case tok::l_paren: return "(";
+ case tok::r_paren: return ")";
+ case tok::l_brace: return "{";
+ case tok::r_brace: return "}";
+ case tok::period: return ".";
+ case tok::ellipsis: return "...";
+ case tok::amp: return "&";
+ case tok::ampamp: return "&&";
+ case tok::ampequal: return "&=";
+ case tok::star: return "*";
+ case tok::starequal: return "*=";
+ case tok::plus: return "+";
+ case tok::plusplus: return "++";
+ case tok::plusequal: return "+=";
+ case tok::minus: return "-";
+ case tok::arrow: return "->";
+ case tok::minusminus: return "--";
+ case tok::minusequal: return "-=";
+ case tok::tilde: return "~";
+ case tok::exclaim: return "!";
+ case tok::exclaimequal: return "!=";
+ case tok::slash: return "/";
+ case tok::slashequal: return "/=";
+ case tok::percent: return "%";
+ case tok::percentequal: return "%=";
+ case tok::less: return "<";
+ case tok::lessless: return "<<";
+ case tok::lessequal: return "<=";
+ case tok::lesslessequal: return "<<=";
+ case tok::greater: return ">";
+ case tok::greatergreater: return ">>";
+ case tok::greaterequal: return ">=";
+ case tok::greatergreaterequal: return ">>=";
+ case tok::caret: return "^";
+ case tok::caretequal: return "^=";
+ case tok::pipe: return "|";
+ case tok::pipepipe: return "||";
+ case tok::pipeequal: return "|=";
+ case tok::question: return "?";
+ case tok::colon: return ":";
+ case tok::semi: return ";";
+ case tok::equal: return "=";
+ case tok::equalequal: return "==";
+ case tok::comma: return ",";
+ case tok::hash: return "#";
+ case tok::hashhash: return "##";
+ case tok::hashat: return "#@";
+ case tok::periodstar: return ".*";
+ case tok::arrowstar: return "->*";
+ case tok::coloncolon: return "::";
+ case tok::at: return "@";
+ default: break;
+ }
+
+ return 0;
+}
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
new file mode 100644
index 0000000..e3da531
--- /dev/null
+++ b/lib/CMakeLists.txt
@@ -0,0 +1,11 @@
+add_subdirectory(Headers)
+add_subdirectory(Basic)
+add_subdirectory(Lex)
+add_subdirectory(Parse)
+add_subdirectory(AST)
+add_subdirectory(Sema)
+add_subdirectory(CodeGen)
+add_subdirectory(Analysis)
+add_subdirectory(Rewrite)
+add_subdirectory(Driver)
+add_subdirectory(Frontend)
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..3de4612
--- /dev/null
+++ b/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,133 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_ABIINFO_H
+#define CLANG_CODEGEN_ABIINFO_H
+
+namespace llvm {
+ class Type;
+}
+
+namespace clang {
+ class ASTContext;
+
+ // FIXME: This is a layering issue if we want to move ABIInfo
+ // down. Fortunately CGFunctionInfo has no real tie to CodeGen.
+ namespace CodeGen {
+ class CGFunctionInfo;
+ class CodeGenFunction;
+ }
+
+ /* FIXME: All of this stuff should be part of the target interface
+ somehow. It is currently here because it is not clear how to factor
+ the targets to support this, since the Targets currently live in a
+ layer below types n'stuff.
+ */
+
+ /// ABIArgInfo - Helper class to encapsulate information about how a
+ /// specific C type should be passed to or returned from a function.
+ class ABIArgInfo {
+ public:
+ enum Kind {
+ Direct, /// Pass the argument directly using the normal
+ /// converted LLVM type. Complex and structure types
+ /// are passed using first class aggregates.
+
+ Indirect, /// Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default
+ /// alignment).
+
+ Ignore, /// Ignore the argument (treat as void). Useful for
+ /// void and empty structs.
+
+ Coerce, /// Only valid for aggregate return types, the argument
+ /// should be accessed by coercion to a provided type.
+
+ Expand, /// Only valid for aggregate argument types. The
+ /// structure should be expanded into consecutive
+ /// arguments for its constituent fields. Currently
+ /// expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable
+ /// types.
+
+ KindFirst=Direct, KindLast=Expand
+ };
+
+ private:
+ Kind TheKind;
+ const llvm::Type *TypeData;
+ unsigned UIntData;
+
+ ABIArgInfo(Kind K, const llvm::Type *TD=0,
+ unsigned UI=0) : TheKind(K),
+ TypeData(TD),
+ UIntData(UI) {}
+ public:
+ ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
+
+ static ABIArgInfo getDirect() {
+ return ABIArgInfo(Direct);
+ }
+ static ABIArgInfo getIgnore() {
+ return ABIArgInfo(Ignore);
+ }
+ static ABIArgInfo getCoerce(const llvm::Type *T) {
+ return ABIArgInfo(Coerce, T);
+ }
+ static ABIArgInfo getIndirect(unsigned Alignment) {
+ return ABIArgInfo(Indirect, 0, Alignment);
+ }
+ static ABIArgInfo getExpand() {
+ return ABIArgInfo(Expand);
+ }
+
+ Kind getKind() const { return TheKind; }
+ bool isDirect() const { return TheKind == Direct; }
+ bool isIgnore() const { return TheKind == Ignore; }
+ bool isCoerce() const { return TheKind == Coerce; }
+ bool isIndirect() const { return TheKind == Indirect; }
+ bool isExpand() const { return TheKind == Expand; }
+
+ // Coerce accessors
+ const llvm::Type *getCoerceToType() const {
+ assert(TheKind == Coerce && "Invalid kind!");
+ return TypeData;
+ }
+
+ // ByVal accessors
+ unsigned getIndirectAlign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return UIntData;
+ }
+
+ void dump() const;
+ };
+
+ /// ABIInfo - Target specific hooks for defining how a type should be
+ /// passed or returned from functions.
+ class ABIInfo {
+ public:
+ virtual ~ABIInfo();
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
+ ASTContext &Ctx) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGen::CodeGenFunction &CGF) const = 0;
+ };
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..ead689c
--- /dev/null
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,1037 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include <algorithm>
+using namespace clang;
+using namespace CodeGen;
+
+llvm::Constant *CodeGenFunction::
+BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
+ const llvm::StructType* Ty,
+ std::vector<HelperInfo> *NoteForHelper) {
+ const llvm::Type *UnsignedLongTy
+ = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *C;
+ std::vector<llvm::Constant*> Elts;
+
+ // reserved
+ C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+ Elts.push_back(C);
+
+ // Size
+ // FIXME: What is the right way to say this doesn't fit? We should give
+ // a user diagnostic in that case. Better fix would be to change the
+ // API to size_t.
+ C = llvm::ConstantInt::get(UnsignedLongTy, Size);
+ Elts.push_back(C);
+
+ if (BlockHasCopyDispose) {
+ // copy_func_helper_decl
+ Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
+
+ // destroy_func_decl
+ Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
+ }
+
+ C = llvm::ConstantStruct::get(Elts);
+
+ C = new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, "__block_descriptor_tmp", &CGM.getModule());
+ return C;
+}
+
+llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock == 0)
+ NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ "_NSConcreteGlobalBlock");
+ return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *BlockModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock == 0)
+ NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ "_NSConcreteStackBlock");
+ return NSConcreteStackBlock;
+}
+
+static void CollectBlockDeclRefInfo(const Stmt *S,
+ CodeGenFunction::BlockInfo &Info) {
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (*I)
+ CollectBlockDeclRefInfo(*I, Info);
+
+ if (const BlockDeclRefExpr *DE = dyn_cast<BlockDeclRefExpr>(S)) {
+ // FIXME: Handle enums.
+ if (isa<FunctionDecl>(DE->getDecl()))
+ return;
+
+ if (DE->isByRef())
+ Info.ByRefDeclRefs.push_back(DE);
+ else
+ Info.ByCopyDeclRefs.push_back(DE);
+ }
+}
+
+/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be
+/// declared as a global variable instead of on the stack.
+static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) {
+ return Info.ByRefDeclRefs.empty() && Info.ByCopyDeclRefs.empty();
+}
+
+// FIXME: Push most into CGM, passing down a few bits, like current function
+// name.
+llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
+
+ std::string Name = CurFn->getName();
+ CodeGenFunction::BlockInfo Info(0, Name.c_str());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
+
+ // Check if the block can be global.
+ // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
+ // to just have one code path. We should move this function into CGM and pass
+ // CGF, then we can just check to see if CGF is 0.
+ if (0 && CanBlockBeGlobal(Info))
+ return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
+
+ std::vector<llvm::Constant*> Elts(5);
+ llvm::Constant *C;
+ llvm::Value *V;
+
+ {
+ // C = BuildBlockStructInitlist();
+ unsigned int flags = BLOCK_HAS_DESCRIPTOR;
+
+ // We run this first so that we set BlockHasCopyDispose from the entire
+ // block literal.
+ // __invoke
+ uint64_t subBlockSize, subBlockAlign;
+ llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
+ bool subBlockHasCopyDispose = false;
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, LocalDeclMap,
+ subBlockSize,
+ subBlockAlign,
+ subBlockDeclRefDecls,
+ subBlockHasCopyDispose);
+ BlockHasCopyDispose |= subBlockHasCopyDispose;
+ Elts[3] = Fn;
+
+ // FIXME: Don't use BlockHasCopyDispose, it is set more often then
+ // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
+ if (subBlockHasCopyDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+
+ // __isa
+ C = CGM.getNSConcreteStackBlock();
+ C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
+ Elts[0] = C;
+
+ // __flags
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ C = llvm::ConstantInt::get(IntTy, flags);
+ Elts[1] = C;
+
+ // __reserved
+ C = llvm::ConstantInt::get(IntTy, 0);
+ Elts[2] = C;
+
+ if (subBlockDeclRefDecls.size() == 0) {
+ // __descriptor
+ Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize, 0, 0);
+
+ // Optimize to being a global block.
+ Elts[0] = CGM.getNSConcreteGlobalBlock();
+ Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
+
+ C = llvm::ConstantStruct::get(Elts);
+
+ char Name[32];
+ sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount());
+ C = new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, Name, &CGM.getModule());
+ QualType BPT = BE->getType();
+ C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
+ return C;
+ }
+
+ std::vector<const llvm::Type *> Types(5+subBlockDeclRefDecls.size());
+ for (int i=0; i<4; ++i)
+ Types[i] = Elts[i]->getType();
+ Types[4] = PtrToInt8Ty;
+
+ for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) {
+ const Expr *E = subBlockDeclRefDecls[i];
+ const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+ QualType Ty = E->getType();
+ if (BDRE && BDRE->isByRef()) {
+ uint64_t Align = getContext().getDeclAlignInBytes(BDRE->getDecl());
+ Types[i+5] = llvm::PointerType::get(BuildByRefType(Ty, Align), 0);
+ } else
+ Types[i+5] = ConvertType(Ty);
+ }
+
+ llvm::StructType *Ty = llvm::StructType::get(Types, true);
+
+ llvm::AllocaInst *A = CreateTempAlloca(Ty);
+ A->setAlignment(subBlockAlign);
+ V = A;
+
+ std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size());
+ int helpersize = 0;
+
+ for (unsigned i=0; i<4; ++i)
+ Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
+
+ for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i)
+ {
+ // FIXME: Push const down.
+ Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]);
+ DeclRefExpr *DR;
+ ValueDecl *VD;
+
+ DR = dyn_cast<DeclRefExpr>(E);
+ // Skip padding.
+ if (DR) continue;
+
+ BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+ VD = BDRE->getDecl();
+
+ llvm::Value* Addr = Builder.CreateStructGEP(V, i+5, "tmp");
+ NoteForHelper[helpersize].index = i+5;
+ NoteForHelper[helpersize].RequiresCopying = BlockRequiresCopying(VD->getType());
+ NoteForHelper[helpersize].flag
+ = VD->getType()->isBlockPointerType() ? BLOCK_FIELD_IS_BLOCK : BLOCK_FIELD_IS_OBJECT;
+
+ if (LocalDeclMap[VD]) {
+ if (BDRE->isByRef()) {
+ NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
+ // FIXME: Someone double check this.
+ (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
+ const llvm::Type *Ty = Types[i+5];
+ llvm::Value *Loc = LocalDeclMap[VD];
+ Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc, false);
+ Loc = Builder.CreateBitCast(Loc, Ty);
+ Builder.CreateStore(Loc, Addr);
+ ++helpersize;
+ continue;
+ } else
+ E = new (getContext()) DeclRefExpr (cast<NamedDecl>(VD),
+ VD->getType(), SourceLocation(),
+ false, false);
+ }
+ if (BDRE->isByRef()) {
+ NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
+ // FIXME: Someone double check this.
+ (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
+ E = new (getContext())
+ UnaryOperator(E, UnaryOperator::AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+ ++helpersize;
+
+ RValue r = EmitAnyExpr(E, Addr, false);
+ if (r.isScalar()) {
+ llvm::Value *Loc = r.getScalarVal();
+ const llvm::Type *Ty = Types[i+5];
+ if (BDRE->isByRef()) {
+ // E is now the address of the value field, instead, we want the
+ // address of the actual ByRef struct. We optimize this slightly
+ // compared to gcc by not grabbing the forwarding slot as this must
+ // be done during Block_copy for us, and we can postpone the work
+ // until then.
+ uint64_t offset = BlockDecls[BDRE->getDecl()];
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+
+ Loc = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ offset),
+ "block.literal");
+ Ty = llvm::PointerType::get(Ty, 0);
+ Loc = Builder.CreateBitCast(Loc, Ty);
+ Loc = Builder.CreateLoad(Loc, false);
+ // Loc = Builder.CreateBitCast(Loc, Ty);
+ }
+ Builder.CreateStore(Loc, Addr);
+ } else if (r.isComplex())
+ // FIXME: implement
+ ErrorUnsupported(BE, "complex in block literal");
+ else if (r.isAggregate())
+ ; // Already created into the destination
+ else
+ assert (0 && "bad block variable");
+ // FIXME: Ensure that the offset created by the backend for
+ // the struct matches the previously computed offset in BlockDecls.
+ }
+ NoteForHelper.resize(helpersize);
+
+ // __descriptor
+ llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose,
+ subBlockSize, Ty,
+ &NoteForHelper);
+ Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
+ Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
+ }
+
+ QualType BPT = BE->getType();
+ return Builder.CreateBitCast(V, ConvertType(BPT));
+}
+
+
+const llvm::Type *BlockModule::getBlockDescriptorType() {
+ if (BlockDescriptorType)
+ return BlockDescriptorType;
+
+ const llvm::Type *UnsignedLongTy =
+ getTypes().ConvertType(getContext().UnsignedLongTy);
+
+ // struct __block_descriptor {
+ // unsigned long reserved;
+ // unsigned long block_size;
+ // };
+ BlockDescriptorType = llvm::StructType::get(UnsignedLongTy,
+ UnsignedLongTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_descriptor",
+ BlockDescriptorType);
+
+ return BlockDescriptorType;
+}
+
+const llvm::Type *BlockModule::getGenericBlockLiteralType() {
+ if (GenericBlockLiteralType)
+ return GenericBlockLiteralType;
+
+ const llvm::Type *BlockDescPtrTy =
+ llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
+ IntTy,
+ IntTy,
+ PtrToInt8Ty,
+ BlockDescPtrTy,
+ NULL);
+
+ getModule().addTypeName("struct.__block_literal_generic",
+ GenericBlockLiteralType);
+
+ return GenericBlockLiteralType;
+}
+
+const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
+ if (GenericExtendedBlockLiteralType)
+ return GenericExtendedBlockLiteralType;
+
+ const llvm::Type *BlockDescPtrTy =
+ llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // void *__copy_func_helper_decl;
+ // void *__destroy_func_decl;
+ // };
+ GenericExtendedBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
+ IntTy,
+ IntTy,
+ PtrToInt8Ty,
+ BlockDescPtrTy,
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ NULL);
+
+ getModule().addTypeName("struct.__block_literal_extended_generic",
+ GenericExtendedBlockLiteralType);
+
+ return GenericExtendedBlockLiteralType;
+}
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
+ const BlockPointerType *BPT =
+ E->getCallee()->getType()->getAsBlockPointerType();
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+ // Get a pointer to the generic block literal.
+ const llvm::Type *BlockLiteralTy =
+ llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+ // Bitcast the callee to a block literal.
+ llvm::Value *BlockLiteral =
+ Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
+
+ BlockLiteral =
+ Builder.CreateBitCast(BlockLiteral,
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty),
+ "tmp");
+
+ // Add the block literal.
+ QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy));
+
+ QualType FnType = BPT->getPointeeType();
+
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAsFunctionProtoType(),
+ E->arg_begin(), E->arg_end());
+
+ // Load the function.
+ llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp");
+
+ QualType ResultType = FnType->getAsFunctionType()->getResultType();
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, Args);
+
+ // Cast the function pointer to the right type.
+ const llvm::Type *BlockFTy =
+ CGM.getTypes().GetFunctionType(FnInfo, false);
+
+ const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+ // And call the block.
+ return EmitCall(FnInfo, Func, Args);
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
+ uint64_t &offset = BlockDecls[E->getDecl()];
+
+ const llvm::Type *Ty;
+ Ty = CGM.getTypes().ConvertType(E->getDecl()->getType());
+
+ // See if we have already allocated an offset for this variable.
+ if (offset == 0) {
+ // Don't run the expensive check, unless we have to.
+ if (!BlockHasCopyDispose && BlockRequiresCopying(E->getType()))
+ BlockHasCopyDispose = true;
+ // if not, allocate one now.
+ offset = getBlockOffset(E);
+ }
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+ llvm::Value *V = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ offset),
+ "block.literal");
+ if (E->isByRef()) {
+ bool needsCopyDispose = BlockRequiresCopying(E->getType());
+ uint64_t Align = getContext().getDeclAlignInBytes(E->getDecl());
+ const llvm::Type *PtrStructTy
+ = llvm::PointerType::get(BuildByRefType(E->getType(), Align), 0);
+ // The block literal will need a copy/destroy helper.
+ BlockHasCopyDispose = true;
+ Ty = PtrStructTy;
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ V = Builder.CreateLoad(V, false);
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateLoad(V, false);
+ V = Builder.CreateBitCast(V, PtrStructTy);
+ V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x");
+ } else {
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ }
+ return V;
+}
+
+void CodeGenFunction::BlockForwardSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
+ llvm::Value *&DMEntry = LocalDeclMap[SelfDecl];
+ if (DMEntry)
+ return;
+ // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care
+ BlockDeclRefExpr *BDRE = new (getContext())
+ BlockDeclRefExpr(SelfDecl,
+ SelfDecl->getType(), SourceLocation(), false);
+ DMEntry = GetAddrOfBlockDecl(BDRE);
+}
+
+llvm::Constant *
+BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
+ // Generate the block descriptor.
+ const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+ getTypes().ConvertType(getContext().IntTy));
+
+ llvm::Constant *DescriptorFields[2];
+
+ // Reserved
+ DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
+
+ // Block literal size. For global blocks we just use the size of the generic
+ // block literal struct.
+ uint64_t BlockLiteralSize =
+ TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8;
+ DescriptorFields[1] = llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize);
+
+ llvm::Constant *DescriptorStruct =
+ llvm::ConstantStruct::get(&DescriptorFields[0], 2);
+
+ llvm::GlobalVariable *Descriptor =
+ new llvm::GlobalVariable(DescriptorStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ DescriptorStruct, "__block_descriptor_global",
+ &getModule());
+
+ // Generate the constants for the block literal.
+ llvm::Constant *LiteralFields[5];
+
+ CodeGenFunction::BlockInfo Info(0, n);
+ uint64_t subBlockSize, subBlockAlign;
+ llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
+ bool subBlockHasCopyDispose = false;
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ llvm::Function *Fn
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap,
+ subBlockSize,
+ subBlockAlign,
+ subBlockDeclRefDecls,
+ subBlockHasCopyDispose);
+ assert(subBlockSize == BlockLiteralSize
+ && "no imports allowed for global block");
+
+ // isa
+ LiteralFields[0] = getNSConcreteGlobalBlock();
+
+ // Flags
+ LiteralFields[1] =
+ llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR);
+
+ // Reserved
+ LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
+
+ // Function
+ LiteralFields[3] = Fn;
+
+ // Descriptor
+ LiteralFields[4] = Descriptor;
+
+ llvm::Constant *BlockLiteralStruct =
+ llvm::ConstantStruct::get(&LiteralFields[0], 5);
+
+ llvm::GlobalVariable *BlockLiteral =
+ new llvm::GlobalVariable(BlockLiteralStruct->getType(), true,
+ llvm::GlobalVariable::InternalLinkage,
+ BlockLiteralStruct, "__block_literal_global",
+ &getModule());
+
+ return BlockLiteral;
+}
+
+llvm::Value *CodeGenFunction::LoadBlockStruct() {
+ return Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], "self");
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
+ const BlockInfo& Info,
+ const Decl *OuterFuncDecl,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm,
+ uint64_t &Size,
+ uint64_t &Align,
+ llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
+ bool &subBlockHasCopyDispose) {
+
+ // Check if we should generate debug info for this block.
+ if (CGM.getDebugInfo())
+ DebugInfo = CGM.getDebugInfo();
+
+ // Arrange for local static and local extern declarations to appear
+ // to be local to this function as well, as they are directly referenced
+ // in a block.
+ for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
+ i != ldm.end();
+ ++i) {
+ const VarDecl *VD = dyn_cast<VarDecl>(i->first);
+
+ if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
+ LocalDeclMap[VD] = i->second;
+ }
+
+ // FIXME: We need to rearrange the code for copy/dispose so we have this
+ // sooner, so we can calculate offsets correctly.
+ if (!BlockHasCopyDispose)
+ BlockOffset = CGM.getTargetData()
+ .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8;
+ else
+ BlockOffset = CGM.getTargetData()
+ .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8;
+ BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
+
+ const FunctionType *BlockFunctionType = BExpr->getFunctionType();
+ QualType ResultType;
+ bool IsVariadic;
+ if (const FunctionProtoType *FTy =
+ dyn_cast<FunctionProtoType>(BlockFunctionType)) {
+ ResultType = FTy->getResultType();
+ IsVariadic = FTy->isVariadic();
+ }
+ else {
+ // K&R style block.
+ ResultType = BlockFunctionType->getResultType();
+ IsVariadic = false;
+ }
+
+ FunctionArgList Args;
+
+ const BlockDecl *BD = BExpr->getBlockDecl();
+
+ // FIXME: This leaks
+ ImplicitParamDecl *SelfDecl =
+ ImplicitParamDecl::Create(getContext(), 0,
+ SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
+ BlockStructDecl = SelfDecl;
+
+ for (BlockDecl::param_const_iterator i = BD->param_begin(),
+ e = BD->param_end(); i != e; ++i)
+ Args.push_back(std::make_pair(*i, (*i)->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(ResultType, Args);
+
+ std::string Name = std::string("__") + Info.Name + "_block_invoke_";
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ CGM.SetInternalFunctionAttributes(BD, Fn, FI);
+
+ StartFunction(BD, ResultType, Fn, Args,
+ BExpr->getBody()->getLocEnd());
+ CurFuncDecl = OuterFuncDecl;
+ CurCodeDecl = BD;
+ EmitStmt(BExpr->getBody());
+ FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
+
+ // The runtime needs a minimum alignment of a void *.
+ uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
+ BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign);
+
+ Size = BlockOffset;
+ Align = BlockAlign;
+ subBlockDeclRefDecls = BlockDeclRefDecls;
+ subBlockHasCopyDispose |= BlockHasCopyDispose;
+ return Fn;
+}
+
+uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
+ const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl());
+
+ uint64_t Size = getContext().getTypeSize(D->getType()) / 8;
+ uint64_t Align = getContext().getDeclAlignInBytes(D);
+
+ if (BDRE->isByRef()) {
+ Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8;
+ Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
+ }
+
+ assert ((Align > 0) && "alignment must be 1 byte or more");
+
+ uint64_t OldOffset = BlockOffset;
+
+ // Ensure proper alignment, even if it means we have to have a gap
+ BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align);
+ BlockAlign = std::max(Align, BlockAlign);
+
+ uint64_t Pad = BlockOffset - OldOffset;
+ if (Pad) {
+ llvm::ArrayType::get(llvm::Type::Int8Ty, Pad);
+ QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
+ llvm::APInt(32, Pad),
+ ArrayType::Normal, 0);
+ ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
+ 0, QualType(PadTy), VarDecl::None,
+ SourceLocation());
+ Expr *E;
+ E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
+ SourceLocation(), false, false);
+ BlockDeclRefDecls.push_back(E);
+ }
+ BlockDeclRefDecls.push_back(BDRE);
+
+ BlockOffset += Size;
+ return BlockOffset-Size;
+}
+
+llvm::Constant *BlockFunction::
+GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__copy_helper_block_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
+ DstObj = Builder.CreateLoad(DstObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
+ Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
+
+ llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ llvm::Value *F = getBlockObjectAssign();
+ Builder.CreateCall3(F, Dstv, Srcv, N);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::
+GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
+ const llvm::StructType* T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__destroy_helper_block_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ if (NoteForHelperp) {
+ std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+ llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+ llvm::Type *PtrPtrT;
+ PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+ SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+ SrcObj = Builder.CreateLoad(SrcObj);
+
+ for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+ int flag = NoteForHelper[i].flag;
+ int index = NoteForHelper[i].index;
+
+ if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+ || NoteForHelper[i].RequiresCopying) {
+ llvm::Value *Srcv = SrcObj;
+ Srcv = Builder.CreateStructGEP(Srcv, index);
+ Srcv = Builder.CreateBitCast(Srcv,
+ llvm::PointerType::get(PtrToInt8Ty, 0));
+ Srcv = Builder.CreateLoad(Srcv);
+
+ BuildBlockRelease(Srcv, flag);
+ }
+ }
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelper) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelper);
+}
+
+llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
+ std::vector<HelperInfo> *NoteForHelperp) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
+ T, NoteForHelperp);
+}
+
+llvm::Constant *BlockFunction::
+GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Dst =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__Block_byref_id_object_copy_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ // dst->x
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
+
+ // src->x
+ V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateBitCast(V, T);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ llvm::Value *SrcObj = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+
+ llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ llvm::Value *F = getBlockObjectAssign();
+ Builder.CreateCall3(F, DstObj, SrcObj, N);
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *
+BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+ int flag) {
+ QualType R = getContext().VoidTy;
+
+ FunctionArgList Args;
+ // FIXME: This leaks
+ ImplicitParamDecl *Src =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ getContext().getPointerType(getContext().VoidTy));
+
+ Args.push_back(std::make_pair(Src, Src->getType()));
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().getFunctionInfo(R, Args);
+
+ std::string Name = std::string("__Block_byref_id_object_dispose_");
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
+
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R,
+ FunctionDecl::Static, false,
+ true);
+ CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+ llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+ V = Builder.CreateLoad(V);
+ V = Builder.CreateStructGEP(V, 6, "x");
+ V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+ V = Builder.CreateLoad(V);
+
+ flag |= BLOCK_BYREF_CALLER;
+ BuildBlockRelease(V, flag);
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
+ int flag) {
+ return CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
+ int flag) {
+ return CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag);
+}
+
+llvm::Value *BlockFunction::getBlockObjectDispose() {
+ if (CGM.BlockObjectDispose == 0) {
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::VoidTy;
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::Int32Ty);
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ CGM.BlockObjectDispose
+ = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
+ }
+ return CGM.BlockObjectDispose;
+}
+
+llvm::Value *BlockFunction::getBlockObjectAssign() {
+ if (CGM.BlockObjectAssign == 0) {
+ const llvm::FunctionType *FTy;
+ std::vector<const llvm::Type*> ArgTys;
+ const llvm::Type *ResultType = llvm::Type::VoidTy;
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(PtrToInt8Ty);
+ ArgTys.push_back(llvm::Type::Int32Ty);
+ FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+ CGM.BlockObjectAssign
+ = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
+ }
+ return CGM.BlockObjectAssign;
+}
+
+void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
+ llvm::Value *F = getBlockObjectDispose();
+ llvm::Value *N;
+ V = Builder.CreateBitCast(V, PtrToInt8Ty);
+ N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ Builder.CreateCall2(F, V, N);
+}
+
+ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
+
+BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
+ CGBuilderTy &B)
+ : CGM(cgm), CGF(cgf), Builder(B) {
+ PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ BlockHasCopyDispose = false;
+}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..56d3a2d
--- /dev/null
+++ b/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,223 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBLOCKS_H
+#define CLANG_CODEGEN_CGBLOCKS_H
+
+#include "CodeGenTypes.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+#include <vector>
+#include <map>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class Value;
+}
+
+namespace clang {
+
+namespace CodeGen {
+class CodeGenModule;
+
+class BlockBase {
+public:
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+ };
+};
+
+class BlockModule : public BlockBase {
+ ASTContext &Context;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ CodeGenTypes &Types;
+ CodeGenModule &CGM;
+
+ ASTContext &getContext() const { return Context; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+public:
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
+ const llvm::Type *getBlockDescriptorType();
+
+ const llvm::Type *getGenericBlockLiteralType();
+ const llvm::Type *getGenericExtendedBlockLiteralType();
+
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+ /// NSConcreteGlobalBlock - Cached reference to the class pointer for global
+ /// blocks.
+ llvm::Constant *NSConcreteGlobalBlock;
+
+ /// NSConcreteStackBlock - Cached reference to the class poinnter for stack
+ /// blocks.
+ llvm::Constant *NSConcreteStackBlock;
+
+ const llvm::Type *BlockDescriptorType;
+ const llvm::Type *GenericBlockLiteralType;
+ const llvm::Type *GenericExtendedBlockLiteralType;
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ llvm::Value *BlockObjectAssign;
+ llvm::Value *BlockObjectDispose;
+ const llvm::Type *PtrToInt8Ty;
+
+ BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
+ CodeGenTypes &T, CodeGenModule &CodeGen)
+ : Context(C), TheModule(M), TheTargetData(TD), Types(T),
+ CGM(CodeGen),
+ NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
+ GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0),
+ BlockObjectAssign(0), BlockObjectDispose(0) {
+ Block.GlobalUniqueCount = 0;
+ PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ }
+};
+
+class BlockFunction : public BlockBase {
+ CodeGenModule &CGM;
+ CodeGenFunction &CGF;
+ ASTContext &getContext() const;
+
+public:
+ const llvm::Type *PtrToInt8Ty;
+ struct HelperInfo {
+ int index;
+ int flag;
+ bool RequiresCopying;
+ };
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128 /* called from __block (byref) copy/dispose
+ support routines */
+ };
+
+ /// BlockInfo - Information to generate a block literal.
+ struct BlockInfo {
+ /// BlockLiteralTy - The type of the block literal.
+ const llvm::Type *BlockLiteralTy;
+
+ /// Name - the name of the function this block was created for, if any.
+ const char *Name;
+
+ /// ByCopyDeclRefs - Variables from parent scopes that have been imported
+ /// into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> ByCopyDeclRefs;
+
+ // ByRefDeclRefs - __block variables from parent scopes that have been
+ // imported into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> ByRefDeclRefs;
+
+ BlockInfo(const llvm::Type *blt, const char *n)
+ : BlockLiteralTy(blt), Name(n) {
+ // Skip asm prefix, if any.
+ if (Name && Name[0] == '\01')
+ ++Name;
+ }
+ };
+
+ CGBuilderTy &Builder;
+
+ BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
+
+ /// BlockOffset - The offset in bytes for the next allocation of an
+ /// imported block variable.
+ uint64_t BlockOffset;
+ /// BlockAlign - Maximal alignment needed for the Block expressed in bytes.
+ uint64_t BlockAlign;
+
+ /// getBlockOffset - Allocate an offset for the ValueDecl from a
+ /// BlockDeclRefExpr in a block literal (BlockExpr).
+ uint64_t getBlockOffset(const BlockDeclRefExpr *E);
+
+ /// BlockHasCopyDispose - True iff the block uses copy/dispose.
+ bool BlockHasCopyDispose;
+
+ /// BlockDeclRefDecls - Decls from BlockDeclRefExprs in apperance order
+ /// in a block literal. Decls without names are used for padding.
+ llvm::SmallVector<const Expr *, 8> BlockDeclRefDecls;
+
+ /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
+ std::map<const Decl*, uint64_t> BlockDecls;
+
+ ImplicitParamDecl *BlockStructDecl;
+ ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
+
+ llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *BuildCopyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+ llvm::Constant *BuildDestroyHelper(const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
+ llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
+
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag);
+
+ llvm::Value *getBlockObjectAssign();
+ llvm::Value *getBlockObjectDispose();
+ void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
+
+ bool BlockRequiresCopying(QualType Ty) {
+ if (Ty->isBlockPointerType())
+ return true;
+ if (getContext().isObjCNSObjectType(Ty))
+ return true;
+ if (getContext().isObjCObjectPointerType(Ty))
+ return true;
+ return false;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..ed56bd9
--- /dev/null
+++ b/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,26 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBUILDER_H
+#define CLANG_CODEGEN_CGBUILDER_H
+
+#include "llvm/Support/IRBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+ // Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+ typedef llvm::IRBuilder<false> CGBuilderTy;
+#else
+ typedef llvm::IRBuilder<> CGBuilderTy;
+#endif
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..d813bba
--- /dev/null
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,1037 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/TargetBuiltins.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
+ Intrinsic::ID Id, const CallExpr *E) {
+ const llvm::Type *ResType[2];
+ ResType[0] = CGF.ConvertType(E->getType());
+ ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+ return RValue::get(CGF.Builder.CreateCall2(AtomF,
+ CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1))));
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+// the expression node, where the return value is the result of the
+// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
+ Intrinsic::ID Id, const CallExpr *E,
+ Instruction::BinaryOps Op) {
+ const llvm::Type *ResType[2];
+ ResType[0] = CGF.ConvertType(E->getType());
+ ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
+ Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
+
+ if (Id == Intrinsic::atomic_load_nand)
+ Result = CGF.Builder.CreateNot(Result);
+
+
+ return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E) {
+ // See if we can constant fold this builtin. If so, don't emit it at all.
+ Expr::EvalResult Result;
+ if (E->Evaluate(Result, CGM.getContext())) {
+ if (Result.Val.isInt())
+ return RValue::get(llvm::ConstantInt::get(Result.Val.getInt()));
+ else if (Result.Val.isFloat())
+ return RValue::get(llvm::ConstantFP::get(Result.Val.getFloat()));
+ }
+
+ switch (BuiltinID) {
+ default: break; // Handle intrinsics and libm functions below.
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__builtin_va_end: {
+ Value *ArgValue = EmitVAListRef(E->getArg(0));
+ const llvm::Type *DestType =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (ArgValue->getType() != DestType)
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue->getNameStart());
+
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::vaend : Intrinsic::vastart;
+ return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
+ }
+ case Builtin::BI__builtin_va_copy: {
+ Value *DstPtr = EmitVAListRef(E->getArg(0));
+ Value *SrcPtr = EmitVAListRef(E->getArg(1));
+
+ const llvm::Type *Type =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ DstPtr = Builder.CreateBitCast(DstPtr, Type);
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ DstPtr, SrcPtr));
+ }
+ case Builtin::BI__builtin_abs: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue, Constant::getNullValue(ArgValue->getType()),
+ "abscond");
+ Value *Result =
+ Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ // ffs(x) -> x ? cttz(x) + 1 : 0
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
+ ConstantInt::get(ArgType, 1), "tmp");
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ // parity(x) -> ctpop(x) & 1
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
+ Value *Result = Builder.CreateAnd(Tmp, ConstantInt::get(ArgType, 1),
+ "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_expect:
+ // FIXME: pass expect through to LLVM
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
+ return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
+ }
+ case Builtin::BI__builtin_object_size: {
+ // FIXME: Implement. For now we just always fail and pretend we
+ // don't know the object size.
+ llvm::APSInt TypeArg = E->getArg(1)->EvaluateAsInt(CGM.getContext());
+ const llvm::Type *ResType = ConvertType(E->getType());
+ // bool UseSubObject = TypeArg.getZExtValue() & 1;
+ bool UseMinimum = TypeArg.getZExtValue() & 2;
+ return RValue::get(ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
+ }
+ case Builtin::BI__builtin_prefetch: {
+ Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+ // FIXME: Technically these constants should of type 'int', yes?
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ ConstantInt::get(llvm::Type::Int32Ty, 3);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
+ return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
+ }
+ case Builtin::BI__builtin_trap: {
+ Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+
+ case Builtin::BI__builtin_powi:
+ case Builtin::BI__builtin_powif:
+ case Builtin::BI__builtin_powil: {
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered: {
+ // Ordered comparisons: we know the arguments to these are matching scalar
+ // floating point values.
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unknown ordered comparison");
+ case Builtin::BI__builtin_isgreater:
+ LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isgreaterequal:
+ LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isless:
+ LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessequal:
+ LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessgreater:
+ LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isunordered:
+ LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+ break;
+ }
+ // ZExt bool to int type.
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
+ "tmp"));
+ }
+ case Builtin::BIalloca:
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: LLVM IR Should allow alloca with an i64 size!
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ Size = Builder.CreateIntCast(Size, llvm::Type::Int32Ty, false, "tmp");
+ return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty, Size, "tmp"));
+ }
+ case Builtin::BI__builtin_bzero: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemSetFn(), Address,
+ llvm::ConstantInt::get(llvm::Type::Int8Ty, 0),
+ EmitScalarExpr(E->getArg(1)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_memcpy: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemCpyFn(), Address,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_memmove: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemMoveFn(), Address,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_memset: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall4(CGM.getMemSetFn(), Address,
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ llvm::Type::Int8Ty),
+ EmitScalarExpr(E->getArg(2)),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_return_address: {
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
+ }
+ case Builtin::BI__builtin_frame_address: {
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
+ }
+ case Builtin::BI__builtin_extract_return_addr: {
+ // FIXME: There should be a target hook for this
+ return RValue::get(EmitScalarExpr(E->getArg(0)));
+ }
+ case Builtin::BI__builtin_unwind_init: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
+ return RValue::get(Builder.CreateCall(F));
+ }
+#if 0
+ // FIXME: Finish/enable when LLVM backend support stabilizes
+ case Builtin::BI__builtin_setjmp: {
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ // Store the frame pointer to the buffer
+ Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+ Value *FrameAddr =
+ Builder.CreateCall(FrameAddrF,
+ Constant::getNullValue(llvm::Type::Int32Ty));
+ Builder.CreateStore(FrameAddr, Buf);
+ // Call the setjmp intrinsic
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
+ const llvm::Type *DestType =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Buf = Builder.CreateBitCast(Buf, DestType);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+ case Builtin::BI__builtin_longjmp: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *DestType =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Buf = Builder.CreateBitCast(Buf, DestType);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+#endif
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ assert(0 && "Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ llvm::Instruction::Add);
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
+ llvm::Instruction::Sub);
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
+ llvm::Instruction::And);
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
+ llvm::Instruction::Or);
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
+ llvm::Instruction::Xor);
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
+ llvm::Instruction::And);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ {
+ const llvm::Type *ResType[2];
+ ResType[0]= ConvertType(E->getType());
+ ResType[1] = ConvertType(E->getArg(0)->getType());
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+ return RValue::get(Builder.CreateCall3(AtomF,
+ EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2))));
+ }
+
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ {
+ const llvm::Type *ResType[2];
+ ResType[0]= ConvertType(E->getArg(1)->getType());
+ ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+ Value *OldVal = EmitScalarExpr(E->getArg(1));
+ Value *PrevVal = Builder.CreateCall3(AtomF,
+ EmitScalarExpr(E->getArg(0)),
+ OldVal,
+ EmitScalarExpr(E->getArg(2)));
+ Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+ // zext bool to int.
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ElTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr, true);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__sync_synchronize: {
+ Value *C[5];
+ C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 1);
+ C[4] = ConstantInt::get(llvm::Type::Int1Ty, 0);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+ return RValue::get(0);
+ }
+
+ // Library functions with special handling.
+ case Builtin::BIsqrt:
+ case Builtin::BIsqrtf:
+ case Builtin::BIsqrtl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ const llvm::Type *ArgType = Arg0->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
+ return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
+ }
+
+ case Builtin::BIpow:
+ case Builtin::BIpowf:
+ case Builtin::BIpowl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ const llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ }
+ }
+
+ // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
+ // that function.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
+ getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return EmitCall(CGM.getBuiltinLibFunction(BuiltinID),
+ E->getCallee()->getType(), E->arg_begin(),
+ E->arg_end());
+
+ // See if we have a target specific intrinsic.
+ const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+ Intrinsic::ID IntrinsicID =
+ Intrinsic::getIntrinsicForGCCBuiltin(Target.getTargetPrefix(), Name);
+
+ if (IntrinsicID != Intrinsic::not_intrinsic) {
+ SmallVector<Value*, 16> Args;
+
+ Function *F = CGM.getIntrinsic(IntrinsicID);
+ const llvm::FunctionType *FTy = F->getFunctionType();
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Value *ArgValue = EmitScalarExpr(E->getArg(i));
+
+ // If the intrinsic arg type is different from the builtin arg type
+ // we need to do a bit cast.
+ const llvm::Type *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType()) {
+ assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+ "Must be able to losslessly bit cast to param");
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ }
+
+ Args.push_back(ArgValue);
+ }
+
+ Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+ QualType BuiltinRetType = E->getType();
+
+ const llvm::Type *RetTy = llvm::Type::VoidTy;
+ if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
+
+ if (RetTy != V->getType()) {
+ assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+ "Must be able to losslessly bit cast result type");
+ V = Builder.CreateBitCast(V, RetTy);
+ }
+
+ return RValue::get(V);
+ }
+
+ // See if we have a target specific builtin that needs to be lowered.
+ if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
+ ErrorUnsupported(E, "builtin function");
+
+ // Unknown builtin, for now just dump it out and return undef.
+ if (hasAggregateLLVMType(E->getType()))
+ return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
+ return RValue::get(UndefValue::get(ConvertType(E->getType())));
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ const char *TargetPrefix = Target.getTargetPrefix();
+ if (strcmp(TargetPrefix, "x86") == 0)
+ return EmitX86BuiltinExpr(BuiltinID, E);
+ else if (strcmp(TargetPrefix, "ppc") == 0)
+ return EmitPPCBuiltinExpr(BuiltinID, E);
+ return 0;
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ switch (BuiltinID) {
+ default: return 0;
+ case X86::BI__builtin_ia32_mulps:
+ return Builder.CreateMul(Ops[0], Ops[1], "mulps");
+ case X86::BI__builtin_ia32_mulpd:
+ return Builder.CreateMul(Ops[0], Ops[1], "mulpd");
+ case X86::BI__builtin_ia32_pand:
+ case X86::BI__builtin_ia32_pand128:
+ return Builder.CreateAnd(Ops[0], Ops[1], "pand");
+ case X86::BI__builtin_ia32_por:
+ case X86::BI__builtin_ia32_por128:
+ return Builder.CreateOr(Ops[0], Ops[1], "por");
+ case X86::BI__builtin_ia32_pxor:
+ case X86::BI__builtin_ia32_pxor128:
+ return Builder.CreateXor(Ops[0], Ops[1], "pxor");
+ case X86::BI__builtin_ia32_pandn:
+ case X86::BI__builtin_ia32_pandn128:
+ Ops[0] = Builder.CreateNot(Ops[0], "tmp");
+ return Builder.CreateAnd(Ops[0], Ops[1], "pandn");
+ case X86::BI__builtin_ia32_paddb:
+ case X86::BI__builtin_ia32_paddb128:
+ case X86::BI__builtin_ia32_paddd:
+ case X86::BI__builtin_ia32_paddd128:
+ case X86::BI__builtin_ia32_paddq:
+ case X86::BI__builtin_ia32_paddq128:
+ case X86::BI__builtin_ia32_paddw:
+ case X86::BI__builtin_ia32_paddw128:
+ case X86::BI__builtin_ia32_addps:
+ case X86::BI__builtin_ia32_addpd:
+ return Builder.CreateAdd(Ops[0], Ops[1], "add");
+ case X86::BI__builtin_ia32_psubb:
+ case X86::BI__builtin_ia32_psubb128:
+ case X86::BI__builtin_ia32_psubd:
+ case X86::BI__builtin_ia32_psubd128:
+ case X86::BI__builtin_ia32_psubq:
+ case X86::BI__builtin_ia32_psubq128:
+ case X86::BI__builtin_ia32_psubw:
+ case X86::BI__builtin_ia32_psubw128:
+ case X86::BI__builtin_ia32_subps:
+ case X86::BI__builtin_ia32_subpd:
+ return Builder.CreateSub(Ops[0], Ops[1], "sub");
+ case X86::BI__builtin_ia32_divps:
+ return Builder.CreateFDiv(Ops[0], Ops[1], "divps");
+ case X86::BI__builtin_ia32_divpd:
+ return Builder.CreateFDiv(Ops[0], Ops[1], "divpd");
+ case X86::BI__builtin_ia32_pmullw:
+ case X86::BI__builtin_ia32_pmullw128:
+ return Builder.CreateMul(Ops[0], Ops[1], "pmul");
+ case X86::BI__builtin_ia32_punpckhbw:
+ return EmitShuffleVector(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15,
+ "punpckhbw");
+ case X86::BI__builtin_ia32_punpckhbw128:
+ return EmitShuffleVector(Ops[0], Ops[1], 8, 24, 9, 25, 10, 26, 11, 27,
+ 12, 28, 13, 29, 14, 30, 15, 31,
+ "punpckhbw");
+ case X86::BI__builtin_ia32_punpckhwd:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "punpckhwd");
+ case X86::BI__builtin_ia32_punpckhwd128:
+ return EmitShuffleVector(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15,
+ "punpckhwd");
+ case X86::BI__builtin_ia32_punpckhdq:
+ return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "punpckhdq");
+ case X86::BI__builtin_ia32_punpckhdq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "punpckhdq");
+ case X86::BI__builtin_ia32_punpckhqdq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "punpckhqdq");
+ case X86::BI__builtin_ia32_punpcklbw:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11,
+ "punpcklbw");
+ case X86::BI__builtin_ia32_punpcklwd:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "punpcklwd");
+ case X86::BI__builtin_ia32_punpckldq:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "punpckldq");
+ case X86::BI__builtin_ia32_punpckldq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "punpckldq");
+ case X86::BI__builtin_ia32_punpcklqdq128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "punpcklqdq");
+ case X86::BI__builtin_ia32_pslldi128:
+ case X86::BI__builtin_ia32_psllqi128:
+ case X86::BI__builtin_ia32_psllwi128:
+ case X86::BI__builtin_ia32_psradi128:
+ case X86::BI__builtin_ia32_psrawi128:
+ case X86::BI__builtin_ia32_psrldi128:
+ case X86::BI__builtin_ia32_psrlqi128:
+ case X86::BI__builtin_ia32_psrlwi128: {
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
+ Ops[1], Zero, "insert");
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi128:
+ name = "pslldi";
+ ID = Intrinsic::x86_sse2_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi128:
+ name = "psllqi";
+ ID = Intrinsic::x86_sse2_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi128:
+ name = "psllwi";
+ ID = Intrinsic::x86_sse2_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi128:
+ name = "psradi";
+ ID = Intrinsic::x86_sse2_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi128:
+ name = "psrawi";
+ ID = Intrinsic::x86_sse2_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi128:
+ name = "psrldi";
+ ID = Intrinsic::x86_sse2_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi128:
+ name = "psrlqi";
+ ID = Intrinsic::x86_sse2_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi128:
+ name = "psrlwi";
+ ID = Intrinsic::x86_sse2_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_pslldi:
+ case X86::BI__builtin_ia32_psllqi:
+ case X86::BI__builtin_ia32_psllwi:
+ case X86::BI__builtin_ia32_psradi:
+ case X86::BI__builtin_ia32_psrawi:
+ case X86::BI__builtin_ia32_psrldi:
+ case X86::BI__builtin_ia32_psrlqi:
+ case X86::BI__builtin_ia32_psrlwi: {
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: assert(0 && "Unsupported shift intrinsic!");
+ case X86::BI__builtin_ia32_pslldi:
+ name = "pslldi";
+ ID = Intrinsic::x86_mmx_psll_d;
+ break;
+ case X86::BI__builtin_ia32_psllqi:
+ name = "psllqi";
+ ID = Intrinsic::x86_mmx_psll_q;
+ break;
+ case X86::BI__builtin_ia32_psllwi:
+ name = "psllwi";
+ ID = Intrinsic::x86_mmx_psll_w;
+ break;
+ case X86::BI__builtin_ia32_psradi:
+ name = "psradi";
+ ID = Intrinsic::x86_mmx_psra_d;
+ break;
+ case X86::BI__builtin_ia32_psrawi:
+ name = "psrawi";
+ ID = Intrinsic::x86_mmx_psra_w;
+ break;
+ case X86::BI__builtin_ia32_psrldi:
+ name = "psrldi";
+ ID = Intrinsic::x86_mmx_psrl_d;
+ break;
+ case X86::BI__builtin_ia32_psrlqi:
+ name = "psrlqi";
+ ID = Intrinsic::x86_mmx_psrl_q;
+ break;
+ case X86::BI__builtin_ia32_psrlwi:
+ name = "psrlwi";
+ ID = Intrinsic::x86_mmx_psrl_w;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ }
+ case X86::BI__builtin_ia32_pshufw: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0],
+ i & 0x3, (i & 0xc) >> 2,
+ (i & 0x30) >> 4, (i & 0xc0) >> 6,
+ "pshufw");
+ }
+ case X86::BI__builtin_ia32_pshuflw: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0],
+ i & 0x3, (i & 0xc) >> 2,
+ (i & 0x30) >> 4, (i & 0xc0) >> 6, 4, 5, 6, 7,
+ "pshuflw");
+ }
+ case X86::BI__builtin_ia32_pshufhw: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0], 0, 1, 2, 3,
+ 4 + (i & 0x3), 4 + ((i & 0xc) >> 2),
+ 4 + ((i & 0x30) >> 4), 4 + ((i & 0xc0) >> 6),
+ "pshufhw");
+ }
+ case X86::BI__builtin_ia32_pshufd: {
+ unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[0],
+ i & 0x3, (i & 0xc) >> 2,
+ (i & 0x30) >> 4, (i & 0xc0) >> 6,
+ "pshufd");
+ }
+ case X86::BI__builtin_ia32_vec_init_v4hi:
+ case X86::BI__builtin_ia32_vec_init_v8qi:
+ case X86::BI__builtin_ia32_vec_init_v2si:
+ return EmitVector(&Ops[0], Ops.size());
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ case X86::BI__builtin_ia32_vec_ext_v2di:
+ case X86::BI__builtin_ia32_vec_ext_v4sf:
+ case X86::BI__builtin_ia32_vec_ext_v4si:
+ case X86::BI__builtin_ia32_vec_ext_v8hi:
+ case X86::BI__builtin_ia32_vec_ext_v4hi:
+ case X86::BI__builtin_ia32_vec_ext_v2df:
+ return Builder.CreateExtractElement(Ops[0], Ops[1], "result");
+ case X86::BI__builtin_ia32_cmpps: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+ }
+ case X86::BI__builtin_ia32_cmpss: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+ }
+ case X86::BI__builtin_ia32_ldmxcsr: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
+ Builder.CreateStore(Ops[0], Tmp);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ }
+ case X86::BI__builtin_ia32_stmxcsr: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
+ One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ return Builder.CreateLoad(Tmp, "stmxcsr");
+ }
+ case X86::BI__builtin_ia32_cmppd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+ }
+ case X86::BI__builtin_ia32_cmpsd: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+ }
+ case X86::BI__builtin_ia32_movss:
+ return EmitShuffleVector(Ops[0], Ops[1], 4, 1, 2, 3, "movss");
+ case X86::BI__builtin_ia32_shufps: {
+ unsigned i = cast<ConstantInt>(Ops[2])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[1],
+ i & 0x3, (i & 0xc) >> 2,
+ ((i & 0x30) >> 4) + 4,
+ ((i & 0xc0) >> 6) + 4, "shufps");
+ }
+ case X86::BI__builtin_ia32_shufpd: {
+ unsigned i = cast<ConstantInt>(Ops[2])->getZExtValue();
+ return EmitShuffleVector(Ops[0], Ops[1], i & 1,
+ ((i & 2) >> 1)+2, "shufpd");
+ }
+ case X86::BI__builtin_ia32_punpcklbw128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 16, 1, 17, 2, 18, 3, 19,
+ 4, 20, 5, 21, 6, 22, 7, 23,
+ "punpcklbw");
+ case X86::BI__builtin_ia32_punpcklwd128:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11,
+ "punpcklwd");
+ case X86::BI__builtin_ia32_movlhps:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 1, 4, 5, "movlhps");
+ case X86::BI__builtin_ia32_movhlps:
+ return EmitShuffleVector(Ops[0], Ops[1], 6, 7, 2, 3, "movhlps");
+ case X86::BI__builtin_ia32_unpckhps:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "unpckhps");
+ case X86::BI__builtin_ia32_unpcklps:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "unpcklps");
+ case X86::BI__builtin_ia32_unpckhpd:
+ return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "unpckhpd");
+ case X86::BI__builtin_ia32_unpcklpd:
+ return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "unpcklpd");
+ case X86::BI__builtin_ia32_movsd:
+ return EmitShuffleVector(Ops[0], Ops[1], 2, 1, "movsd");
+ case X86::BI__builtin_ia32_loadlps:
+ case X86::BI__builtin_ia32_loadhps: {
+ // FIXME: This should probably be represented as
+ // shuffle (dst, (v4f32 (insert undef, (load i64), 0)), shuf mask hi/lo)
+ const llvm::Type *EltTy = llvm::Type::DoubleTy;
+ const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+ const llvm::Type *OrigTy = Ops[0]->getType();
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_loadlps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
+ Ops[1] = Builder.CreateBitCast(Ops[1], llvm::PointerType::getUnqual(EltTy));
+ Ops[1] = Builder.CreateLoad(Ops[1], "tmp");
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[0] = Builder.CreateInsertElement(Ops[0], Ops[1], Idx, "loadps");
+ return Builder.CreateBitCast(Ops[0], OrigTy, "loadps");
+ }
+ case X86::BI__builtin_ia32_loadlpd:
+ case X86::BI__builtin_ia32_loadhpd: {
+ Ops[1] = Builder.CreateLoad(Ops[1], "tmp");
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_loadlpd ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Idx, "loadpd");
+ }
+ case X86::BI__builtin_ia32_storehps:
+ case X86::BI__builtin_ia32_storelps: {
+ const llvm::Type *EltTy = llvm::Type::Int64Ty;
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
+ llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+
+ // cast val v2i64
+ Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+ // extract (0, 1)
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+ // cast pointer to i64 & store
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case X86::BI__builtin_ia32_loadlv4si: {
+ // load i64
+ const llvm::Type *EltTy = llvm::Type::Int64Ty;
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ Ops[0] = Builder.CreateLoad(Ops[0], "load");
+
+ // scalar to vector: insert i64 into 2 x i64 undef
+ llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(llvm::UndefValue::get(VecTy),
+ Ops[0], Zero, "s2v");
+
+ // shuffle into zero vector.
+ std::vector<llvm::Constant *>Elts;
+ Elts.resize(2, llvm::ConstantInt::get(EltTy, 0));
+ llvm::Value *ZV = ConstantVector::get(Elts);
+ Ops[0] = EmitShuffleVector(ZV, Ops[0], 2, 1, "loadl");
+
+ // bitcast to result.
+ return Builder.CreateBitCast(Ops[0],
+ llvm::VectorType::get(llvm::Type::Int32Ty, 4));
+ }
+ case X86::BI__builtin_ia32_vec_set_v4hi:
+ case X86::BI__builtin_ia32_vec_set_v8hi:
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrw");
+ case X86::BI__builtin_ia32_vec_set_v4si:
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrd");
+ case X86::BI__builtin_ia32_vec_set_v2di:
+ return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrq");
+ case X86::BI__builtin_ia32_andps:
+ case X86::BI__builtin_ia32_andpd:
+ case X86::BI__builtin_ia32_andnps:
+ case X86::BI__builtin_ia32_andnpd:
+ case X86::BI__builtin_ia32_orps:
+ case X86::BI__builtin_ia32_orpd:
+ case X86::BI__builtin_ia32_xorpd:
+ case X86::BI__builtin_ia32_xorps: {
+ const llvm::Type *ITy = llvm::VectorType::get(llvm::Type::Int32Ty, 4);
+ const llvm::Type *FTy = Ops[0]->getType();
+ Ops[0] = Builder.CreateBitCast(Ops[0], ITy, "bitcast");
+ Ops[1] = Builder.CreateBitCast(Ops[1], ITy, "bitcast");
+ switch (BuiltinID) {
+ case X86::BI__builtin_ia32_andps:
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andps");
+ break;
+ case X86::BI__builtin_ia32_andpd:
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andpd");
+ break;
+ case X86::BI__builtin_ia32_andnps:
+ Ops[0] = Builder.CreateNot(Ops[0], "not");
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andnps");
+ break;
+ case X86::BI__builtin_ia32_andnpd:
+ Ops[0] = Builder.CreateNot(Ops[0], "not");
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andnpd");
+ break;
+ case X86::BI__builtin_ia32_orps:
+ Ops[0] = Builder.CreateOr(Ops[0], Ops[1], "orps");
+ break;
+ case X86::BI__builtin_ia32_orpd:
+ Ops[0] = Builder.CreateOr(Ops[0], Ops[1], "orpd");
+ break;
+ case X86::BI__builtin_ia32_xorps:
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1], "xorps");
+ break;
+ case X86::BI__builtin_ia32_xorpd:
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1], "xorpd");
+ break;
+ }
+ return Builder.CreateBitCast(Ops[0], FTy, "bitcast");
+ }
+ }
+}
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ default: return 0;
+ }
+}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..731e38c
--- /dev/null
+++ b/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,454 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+void
+CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ // FIXME: This should use __cxa_guard_{acquire,release}?
+
+ assert(!getContext().getLangOptions().ThreadsafeStatics &&
+ "thread safe statics are currently not supported!");
+
+ llvm::SmallString<256> GuardVName;
+ llvm::raw_svector_ostream GuardVOut(GuardVName);
+ mangleGuardVariable(&D, getContext(), GuardVOut);
+
+ // Create the guard variable.
+ llvm::GlobalValue *GuardV =
+ new llvm::GlobalVariable(llvm::Type::Int64Ty, false,
+ GV->getLinkage(),
+ llvm::Constant::getNullValue(llvm::Type::Int64Ty),
+ GuardVName.c_str(),
+ &CGM.getModule());
+
+ // Load the first byte of the guard variable.
+ const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::Int8Ty, 0);
+ llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
+ "tmp");
+
+ // Compare it against 0.
+ llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::Int8Ty);
+ llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool");
+
+ llvm::BasicBlock *InitBlock = createBasicBlock("init");
+ llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
+
+ // If the guard variable is 0, jump to the initializer code.
+ Builder.CreateCondBr(ICmp, InitBlock, EndBlock);
+
+ EmitBlock(InitBlock);
+
+ const Expr *Init = D.getInit();
+ if (!hasAggregateLLVMType(Init->getType())) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ Builder.CreateStore(V, GV, D.getType().isVolatileQualified());
+ } else if (Init->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, GV, D.getType().isVolatileQualified());
+ } else {
+ EmitAggExpr(Init, GV, D.getType().isVolatileQualified());
+ }
+
+ Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::Int8Ty, 1),
+ Builder.CreateBitCast(GuardV, PtrTy));
+
+ EmitBlock(EndBlock);
+}
+
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This),
+ MD->getThisType(getContext())));
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+ QualType ResultType = MD->getType()->getAsFunctionType()->getResultType();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
+ Callee, Args, MD);
+}
+
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) {
+ const MemberExpr *ME = cast<MemberExpr>(CE->getCallee());
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
+
+ llvm::Value *This;
+
+ if (ME->isArrow())
+ This = EmitScalarExpr(ME->getBase());
+ else {
+ LValue BaseLV = EmitLValue(ME->getBase());
+ This = BaseLV.getAddress();
+ }
+
+ return EmitCXXMemberCall(MD, Callee, This,
+ CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+
+ const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
+
+ llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+
+ return EmitCXXMemberCall(MD, Callee, This,
+ E->arg_begin() + 1, E->arg_end());
+}
+
+llvm::Value *CodeGenFunction::LoadCXXThis() {
+ assert(isa<CXXMethodDecl>(CurFuncDecl) &&
+ "Must be in a C++ member function decl to load 'this'");
+ assert(cast<CXXMethodDecl>(CurFuncDecl)->isInstance() &&
+ "Must be in a C++ member function decl to load 'this'");
+
+ // FIXME: What if we're inside a block?
+ // ans: See how CodeGenFunction::LoadObjCSelf() uses
+ // CodeGenFunction::BlockForwardSelf() for how to do this.
+ return Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
+}
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, This, ArgBeg, ArgEnd);
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ llvm::Value *This) {
+ llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, This, 0, 0);
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+ const CXXConstructExpr *E) {
+ assert(Dest && "Must have a destination!");
+
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(E->getType()->getAsRecordType()->getDecl());
+ if (RD->hasTrivialConstructor())
+ return;
+
+ // Call the constructor.
+ EmitCXXConstructorCall(E->getConstructor(), Ctor_Complete, Dest,
+ E->arg_begin(), E->arg_end());
+}
+
+void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
+ llvm::Value *Ptr) {
+ LiveTemporaries.push_back(Temporary);
+
+ // Make a cleanup scope and emit the destructor.
+ {
+ CleanupScope Scope(*this);
+
+ EmitCXXDestructorCall(Temporary->getDestructor(), Dtor_Complete, Ptr);
+ }
+}
+
+RValue
+CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc,
+ bool isAggLocVolatile) {
+ // Keep track of the current cleanup stack depth.
+ size_t CleanupStackDepth = CleanupEntries.size();
+
+ unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+ RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, isAggLocVolatile);
+
+ // Go through the temporaries backwards.
+ for (unsigned i = E->getNumTemporaries(); i != 0; --i) {
+ assert(LiveTemporaries.back() == E->getTemporary(i - 1));
+ LiveTemporaries.pop_back();
+ }
+
+ assert(OldNumLiveTemporaries == LiveTemporaries.size() &&
+ "Live temporary stack mismatch!");
+
+ EmitCleanupBlocks(CleanupStackDepth);
+
+ return RV;
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ if (E->isArray()) {
+ ErrorUnsupported(E, "new[] expression");
+ return llvm::UndefValue::get(ConvertType(E->getType()));
+ }
+
+ QualType AllocType = E->getAllocatedType();
+ FunctionDecl *NewFD = E->getOperatorNew();
+ const FunctionProtoType *NewFTy = NewFD->getType()->getAsFunctionProtoType();
+
+ CallArgList NewArgs;
+
+ // The allocation size is the first argument.
+ QualType SizeTy = getContext().getSizeType();
+ llvm::Value *AllocSize =
+ llvm::ConstantInt::get(ConvertType(SizeTy),
+ getContext().getTypeSize(AllocType) / 8);
+
+ NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
+
+ // Emit the rest of the arguments.
+ // FIXME: Ideally, this should just use EmitCallArgs.
+ CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
+
+ // First, use the types from the function type.
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
+ QualType ArgType = NewFTy->getArgType(i);
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
+ NewArg != NewArgEnd; ++NewArg) {
+ QualType ArgType = NewArg->getType();
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+ }
+
+ // Emit the call to new.
+ RValue RV =
+ EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
+ CGM.GetAddrOfFunction(GlobalDecl(NewFD)),
+ NewArgs, NewFD);
+
+ // If an allocation function is declared with an empty exception specification
+ // it returns null to indicate failure to allocate storage. [expr.new]p13.
+ // (We don't need to check for null when there's no new initializer and
+ // we're allocating a POD type).
+ bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
+ !(AllocType->isPODType() && !E->hasInitializer());
+
+ llvm::BasicBlock *NewNull = 0;
+ llvm::BasicBlock *NewNotNull = 0;
+ llvm::BasicBlock *NewEnd = 0;
+
+ llvm::Value *NewPtr = RV.getScalarVal();
+
+ if (NullCheckResult) {
+ NewNull = createBasicBlock("new.null");
+ NewNotNull = createBasicBlock("new.notnull");
+ NewEnd = createBasicBlock("new.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(NewPtr,
+ llvm::Constant::getNullValue(NewPtr->getType()),
+ "isnull");
+
+ Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
+ EmitBlock(NewNotNull);
+ }
+
+ NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+
+ if (AllocType->isPODType()) {
+ if (E->getNumConstructorArgs() > 0) {
+ assert(E->getNumConstructorArgs() == 1 &&
+ "Can only have one argument to initializer of POD type.");
+
+ const Expr *Init = E->getConstructorArg(0);
+
+ if (!hasAggregateLLVMType(AllocType))
+ Builder.CreateStore(EmitScalarExpr(Init), NewPtr);
+ else if (AllocType->isAnyComplexType())
+ EmitComplexExprIntoAddr(Init, NewPtr, AllocType.isVolatileQualified());
+ else
+ EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+ }
+ } else {
+ // Call the constructor.
+ CXXConstructorDecl *Ctor = E->getConstructor();
+
+ EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
+ E->constructor_arg_begin(),
+ E->constructor_arg_end());
+ }
+
+ if (NullCheckResult) {
+ Builder.CreateBr(NewEnd);
+ EmitBlock(NewNull);
+ Builder.CreateBr(NewEnd);
+ EmitBlock(NewEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(NewPtr, NewNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
+
+ NewPtr = PHI;
+ }
+
+ return NewPtr;
+}
+
+static bool canGenerateCXXstructor(const CXXRecordDecl *RD,
+ ASTContext &Context) {
+ // The class has base classes - we don't support that right now.
+ if (RD->getNumBases() > 0)
+ return false;
+
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(Context),
+ E = RD->field_end(Context); I != E; ++I) {
+ // We don't support ctors for fields that aren't POD.
+ if (!I->getType()->isPODType())
+ return false;
+ }
+
+ return true;
+}
+
+void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ if (!canGenerateCXXstructor(D->getParent(), getContext())) {
+ ErrorUnsupported(D, "C++ constructor", true);
+ return;
+ }
+
+ EmitGlobal(GlobalDecl(D, Ctor_Complete));
+ EmitGlobal(GlobalDecl(D, Ctor_Base));
+}
+
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+
+ llvm::Function *Fn = GetAddrOfCXXConstructor(D, Type);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::Function *
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
+
+ const char *Name = getMangledCXXCtorName(D, Type);
+ return cast<llvm::Function>(
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ mangleCXXCtor(D, Type, Context, Out);
+
+ Name += '\0';
+ return UniqueMangledName(Name.begin(), Name.end());
+}
+
+void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ if (!canGenerateCXXstructor(D->getParent(), getContext())) {
+ ErrorUnsupported(D, "C++ destructor", true);
+ return;
+ }
+
+ EmitCXXDestructor(D, Dtor_Complete);
+ EmitCXXDestructor(D, Dtor_Base);
+}
+
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ llvm::Function *Fn = GetAddrOfCXXDestructor(D, Type);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::Function *
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
+
+ const char *Name = getMangledCXXDtorName(D, Type);
+ return cast<llvm::Function>(
+ GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ mangleCXXDtor(D, Type, Context, Out);
+
+ Name += '\0';
+ return UniqueMangledName(Name.begin(), Name.end());
+}
diff --git a/lib/CodeGen/CGCXX.h b/lib/CodeGen/CGCXX.h
new file mode 100644
index 0000000..6051d91
--- /dev/null
+++ b/lib/CodeGen/CGCXX.h
@@ -0,0 +1,36 @@
+//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCXX_H
+#define CLANG_CODEGEN_CGCXX_H
+
+namespace clang {
+
+/// CXXCtorType - C++ constructor types
+enum CXXCtorType {
+ Ctor_Complete, // Complete object ctor
+ Ctor_Base, // Base object ctor
+ Ctor_CompleteAllocating // Complete object allocating ctor
+};
+
+/// CXXDtorType - C++ destructor types
+enum CXXDtorType {
+ Dtor_Deleting, // Deleting dtor
+ Dtor_Complete, // Complete object dtor
+ Dtor_Base // Base object dtor
+};
+
+} // end namespace clang
+
+#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..ea0b887
--- /dev/null
+++ b/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,2196 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetData.h"
+
+#include "ABIInfo.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+// FIXME: Use iterator and sidestep silly type array creation.
+
+const
+CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
+ return getFunctionInfo(FTNP->getResultType(),
+ llvm::SmallVector<QualType, 16>());
+}
+
+const
+CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
+ llvm::SmallVector<QualType, 16> ArgTys;
+ // FIXME: Kill copy.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+ return getFunctionInfo(FTP->getResultType(), ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
+ llvm::SmallVector<QualType, 16> ArgTys;
+ // Add the 'this' pointer unless this is a static method.
+ if (MD->isInstance())
+ ArgTys.push_back(MD->getThisType(Context));
+
+ const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+ return getFunctionInfo(FTP->getResultType(), ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance())
+ return getFunctionInfo(MD);
+
+ const FunctionType *FTy = FD->getType()->getAsFunctionType();
+ if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
+ return getFunctionInfo(FTP);
+ return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
+ llvm::SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(MD->getSelfDecl()->getType());
+ ArgTys.push_back(Context.getObjCSelType());
+ // FIXME: Kill copy?
+ for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
+ e = MD->param_end(); i != e; ++i)
+ ArgTys.push_back((*i)->getType());
+ return getFunctionInfo(MD->getResultType(), ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const CallArgList &Args) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<QualType, 16> ArgTys;
+ for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(i->second);
+ return getFunctionInfo(ResTy, ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args) {
+ // FIXME: Kill copy.
+ llvm::SmallVector<QualType, 16> ArgTys;
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i)
+ ArgTys.push_back(i->second);
+ return getFunctionInfo(ResTy, ArgTys);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const llvm::SmallVector<QualType, 16> &ArgTys) {
+ // Lookup or create unique function info.
+ llvm::FoldingSetNodeID ID;
+ CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
+
+ void *InsertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+ if (FI)
+ return *FI;
+
+ // Construct the function info.
+ FI = new CGFunctionInfo(ResTy, ArgTys);
+ FunctionInfos.InsertNode(FI, InsertPos);
+
+ // Compute ABI information.
+ getABIInfo().computeInfo(*FI, getContext());
+
+ return *FI;
+}
+
+/***/
+
+ABIInfo::~ABIInfo() {}
+
+void ABIArgInfo::dump() const {
+ fprintf(stderr, "(ABIArgInfo Kind=");
+ switch (TheKind) {
+ case Direct:
+ fprintf(stderr, "Direct");
+ break;
+ case Ignore:
+ fprintf(stderr, "Ignore");
+ break;
+ case Coerce:
+ fprintf(stderr, "Coerce Type=");
+ getCoerceToType()->print(llvm::errs());
+ break;
+ case Indirect:
+ fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
+ break;
+ case Expand:
+ fprintf(stderr, "Expand");
+ break;
+ }
+ fprintf(stderr, ")\n");
+}
+
+/***/
+
+static bool isEmptyRecord(ASTContext &Context, QualType T);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+ // Constant arrays of empty records count as empty, strip them off.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+ FT = AT->getElementType();
+
+ return isEmptyRecord(Context, FT);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T) {
+ const RecordType *RT = T->getAsRecordType();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i)
+ if (!isEmptyField(Context, *i))
+ return false;
+ return true;
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
+ ASTContext &Context) {
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ ASTContext &Context;
+ bool IsDarwin;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+
+public:
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(ASTContext &Context, bool d)
+ : ABIInfo(), Context(Context), IsDarwin(d) {}
+};
+}
+
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, or complex type, it is ok.
+ if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAsRecordType();
+ if (!RT) return false;
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context),
+ e = RT->getDecl()->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (const VectorType *VT = RetTy->getAsVectorType()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwin) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
+ 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = RetTy->getAsStructureType())
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+
+ // Outside of Darwin, structs and unions are always indirect.
+ if (!IsDarwin && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Classify "single element" structs as their element type.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
+ if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
+ if (BT->isIntegerType()) {
+ // We need to use the size of the structure, padding
+ // bit-fields can adjust that to be larger than the single
+ // element type.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
+ } else if (BT->getKind() == BuiltinType::Float) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
+ } else if (BT->getKind() == BuiltinType::Double) {
+ assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+ "Unexpect single element structure size!");
+ return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
+ }
+ } else if (SeltTy->isPointerType()) {
+ // FIXME: It would be really nice if this could come out as the proper
+ // pointer type.
+ llvm::Type *PtrTy =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ return ABIArgInfo::getCoerce(PtrTy);
+ } else if (SeltTy->isVectorType()) {
+ // 64- and 128-bit vectors are never returned in a
+ // register when inside a structure.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size == 64 || Size == 128)
+ return ABIArgInfo::getIndirect(0);
+
+ return classifyReturnType(QualType(SeltTy, 0), Context);
+ }
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
+ uint64_t Size = Context.getTypeSize(RetTy);
+ return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAsStructureType())
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+
+ // Ignore empty structs.
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (Ty->isStructureType() && Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Expand structs with size <= 128-bits which consist only of
+ // basic types (int, long long, float, double, xxx*). This is
+ // non-recursive and does not ignore empty fields.
+ if (const RecordType *RT = Ty->getAsStructureType()) {
+ if (Context.getTypeSize(Ty) <= 4*32 &&
+ areAllFields32Or64BitBasicType(RT->getDecl(), Context))
+ return ABIArgInfo::getExpand();
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ Class merge(Class Accum, Class Field) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const;
+
+ /// getCoerceResult - Given a source type \arg Ty and an LLVM type
+ /// to coerce to, chose the best way to pass Ty in the same place
+ /// that \arg CoerceTo would be passed, but while keeping the
+ /// emitted code as simple as possible.
+ ///
+ /// FIXME: Note, this should be cleaned up to just take an enumeration of all
+ /// the ways we might want to pass things, instead of constructing an LLVM
+ /// type. This makes this code more explicit, and it makes it clearer that we
+ /// are also doing this for correctness in the case of passing scalar types.
+ ABIArgInfo getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ ASTContext &Context,
+ unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+public:
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
+ Class Field) const {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ else if (Field == Memory)
+ return Memory;
+ else if (Accum == NoClass)
+ return Field;
+ else if (Accum == Integer || Field == Integer)
+ return Integer;
+ else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ else
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty,
+ ASTContext &Context,
+ uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ } else if (const EnumType *ET = Ty->getAsEnumType()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
+ } else if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ } else if (const VectorType *VT = Ty->getAsVectorType()) {
+ uint64_t Size = Context.getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128) {
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ } else if (const ComplexType *CT = Ty->getAsComplexType()) {
+ QualType ET = Context.getCanonicalType(CT->getElementType());
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ if (ET->isIntegralType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == Context.FloatTy)
+ Current = SSE;
+ else if (ET == Context.DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == Context.LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = Context.getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // Do post merger cleanup (see below). Only case we worry about is Memory.
+ if (Hi == Memory)
+ Lo = Memory;
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ } else if (const RecordType *RT = Ty->getAsRecordType()) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than two eightbytes, ..., it has class MEMORY.
+ if (Size > 128)
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is MEMORY, the whole argument is
+ // passed in memory.
+ //
+ // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
+
+ // The first of these conditions is guaranteed by how we implement
+ // the merge (just bail).
+ //
+ // The second condition occurs in the case of unions; for example
+ // union { _Complex double; unsigned; }.
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
+ const llvm::Type *CoerceTo,
+ ASTContext &Context) const {
+ if (CoerceTo == llvm::Type::Int64Ty) {
+ // Integer and pointer types will end up in a general purpose
+ // register.
+ if (Ty->isIntegralType() || Ty->isPointerType())
+ return ABIArgInfo::getDirect();
+
+ } else if (CoerceTo == llvm::Type::DoubleTy) {
+ // FIXME: It would probably be better to make CGFunctionInfo only map using
+ // canonical types than to canonize here.
+ QualType CTy = Context.getCanonicalType(Ty);
+
+ // Float and double end up in a single SSE reg.
+ if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
+ return ABIArgInfo::getDirect();
+
+ }
+
+ return ABIArgInfo::getCoerce(CoerceTo);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ ASTContext &Context) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty))
+ return ABIArgInfo::getDirect();
+
+ // FIXME: Set alignment correctly.
+ return ABIArgInfo::getIndirect(0);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectResult(RetTy, Context);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = llvm::Type::Int64Ty; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = llvm::Type::DoubleTy; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::X86_FP80Ty; break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
+ llvm::Type::X86_FP80Ty,
+ NULL);
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ assert(0 && "Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass: break;
+
+ case Integer:
+ ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
+ break;
+ case SSE:
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the upper half of the last used SSE register.
+ //
+ // SSEUP should always be preceeded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceeded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceeded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87)
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ break;
+ }
+
+ return getCoerceResult(RetTy, ResType, Context);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+ unsigned &neededInt,
+ unsigned &neededSSE) const {
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, Context, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ const llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ return ABIArgInfo::getIgnore();
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ return getIndirectResult(Ty, Context);
+
+ case SSEUp:
+ case X87Up:
+ assert(0 && "Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+ ResType = llvm::Type::Int64Ty;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE:
+ ++neededSSE;
+ ResType = llvm::Type::DoubleTy;
+ break;
+ }
+
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceed by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ assert(0 && "Invalid classification for hi word.");
+ break;
+
+ case NoClass: break;
+ case Integer:
+ ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
+ ++neededInt;
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
+ break;
+ }
+
+ return getCoerceResult(Ty, ResType, Context);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, Context);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // Note that we follow the ABI & gcc here, even though the type
+ // could in theory have an alignment greater than 16. This case
+ // shouldn't ever matter in practice.
+
+ // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ llvm::Type::Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+ ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
+ neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs =
+ CGF.Builder.CreateICmpULE(gp_offset,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 48 - neededInt * 8),
+ "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ CGF.Builder.CreateICmpULE(fp_offset,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 176 - neededSSE * 16),
+ "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+ const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ const llvm::Type *TyLo = ST->getElementType(0);
+ const llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
+ "Unexpected ABI info for mixed regs");
+ const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi =
+ CGF.Builder.CreateGEP(RegAddrLo,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
+ const llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
+ const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
+ llvm::Type::DoubleTy,
+ NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+ "vaarg.addr");
+ ResAddr->reserveOperandSpace(2);
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+
+ return ResAddr;
+}
+
+// ABI Info for PIC16
+class PIC16ABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Context);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+};
+
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ return ABIArgInfo::getDirect();
+}
+
+llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+class ARMABIInfo : public ABIInfo {
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ ASTContext &Context) const;
+
+ ABIArgInfo classifyArgumentType(QualType RetTy,
+ ASTContext &Context) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ it->info = classifyArgumentType(it->type, Context);
+ }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getDirect();
+ }
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ const llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (Context.getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::Int64Ty;
+ SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::Int32Ty;
+ SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
+ }
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
+ const llvm::Type* STy = llvm::StructType::get(LLVMFields, true);
+ return ABIArgInfo::getCoerce(STy);
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = Context.getTypeSize(RetTy);
+ if (Size <= 32)
+ return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
+ ASTContext &Context) const {
+ if (RetTy->isVoidType()) {
+ return ABIArgInfo::getIgnore();
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
+ ASTContext &Context) const {
+ if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ return ABIArgInfo::getIndirect(0);
+ } else {
+ return ABIArgInfo::getDirect();
+ }
+}
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+const ABIInfo &CodeGenTypes::getABIInfo() const {
+ if (TheABIInfo)
+ return *TheABIInfo;
+
+ // For now we just cache this in the CodeGenTypes and don't bother
+ // to free it.
+ const char *TargetPrefix = getContext().Target.getTargetPrefix();
+ if (strcmp(TargetPrefix, "x86") == 0) {
+ bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin");
+ switch (getContext().Target.getPointerWidth(0)) {
+ case 32:
+ return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin));
+ case 64:
+ return *(TheABIInfo = new X86_64ABIInfo());
+ }
+ } else if (strcmp(TargetPrefix, "arm") == 0) {
+ // FIXME: Support for OABI?
+ return *(TheABIInfo = new ARMABIInfo());
+ } else if (strcmp(TargetPrefix, "pic16") == 0) {
+ return *(TheABIInfo = new PIC16ABIInfo());
+ }
+
+ return *(TheABIInfo = new DefaultABIInfo);
+}
+
+/***/
+
+CGFunctionInfo::CGFunctionInfo(QualType ResTy,
+ const llvm::SmallVector<QualType, 16> &ArgTys) {
+ NumArgs = ArgTys.size();
+ Args = new ArgInfo[1 + NumArgs];
+ Args[0].type = ResTy;
+ for (unsigned i = 0; i < NumArgs; ++i)
+ Args[1 + i].type = ArgTys[i];
+}
+
+/***/
+
+void CodeGenTypes::GetExpandedTypes(QualType Ty,
+ std::vector<const llvm::Type*> &ArgTys) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+
+ QualType FT = FD->getType();
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ GetExpandedTypes(FT, ArgTys);
+ } else {
+ ArgTys.push_back(ConvertType(FT));
+ }
+ }
+}
+
+llvm::Function::arg_iterator
+CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator AI) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
+ llvm::Value *Addr = LV.getAddress();
+ for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
+ e = RD->field_end(getContext()); i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, false, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ AI = ExpandTypeFromArgs(FT, LV, AI);
+ } else {
+ EmitStoreThroughLValue(RValue::get(AI), LV, FT);
+ ++AI;
+ }
+ }
+
+ return AI;
+}
+
+void
+CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ llvm::SmallVector<llvm::Value*, 16> &Args) {
+ const RecordType *RT = Ty->getAsStructureType();
+ assert(RT && "Can only expand structure types.");
+
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
+ e = RD->field_end(getContext()); i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, false, 0);
+ if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+ ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
+ } else {
+ RValue RV = EmitLoadOfLValue(LV, FT);
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+ Args.push_back(RV.getScalarVal());
+ }
+ }
+}
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
+ const llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ // If load is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ return Load;
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
+ }
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+ llvm::Value *DstPtr,
+ CodeGenFunction &CGF) {
+ const llvm::Type *SrcTy = Src->getType();
+ const llvm::Type *DstTy =
+ cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ // If store is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
+ CGF.Builder.CreateStore(Src, Tmp);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ CGF.Builder.CreateStore(Load, DstPtr);
+ }
+}
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
+ return FI.getReturnInfo().isIndirect();
+}
+
+const llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
+ std::vector<const llvm::Type*> ArgTys;
+
+ const llvm::Type *ResultType = 0;
+
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+
+ case ABIArgInfo::Direct:
+ ResultType = ConvertType(RetTy);
+ break;
+
+ case ABIArgInfo::Indirect: {
+ assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
+ ResultType = llvm::Type::VoidTy;
+ const llvm::Type *STy = ConvertType(RetTy);
+ ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ ResultType = llvm::Type::VoidTy;
+ break;
+
+ case ABIArgInfo::Coerce:
+ ResultType = RetAI.getCoerceToType();
+ break;
+ }
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ const ABIArgInfo &AI = it->info;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce:
+ ArgTys.push_back(AI.getCoerceToType());
+ break;
+
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ const llvm::Type *LTy = ConvertTypeForMem(it->type);
+ ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ break;
+ }
+
+ case ABIArgInfo::Direct:
+ ArgTys.push_back(ConvertType(it->type));
+ break;
+
+ case ABIArgInfo::Expand:
+ GetExpandedTypes(it->type, ArgTys);
+ break;
+ }
+ }
+
+ return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
+}
+
+void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
+ const Decl *TargetDecl,
+ AttributeListType &PAL) {
+ unsigned FuncAttrs = 0;
+ unsigned RetAttrs = 0;
+
+ // FIXME: handle sseregparm someday...
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<NoThrowAttr>())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ if (TargetDecl->hasAttr<NoReturnAttr>())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+ if (TargetDecl->hasAttr<ConstAttr>())
+ FuncAttrs |= llvm::Attribute::ReadNone;
+ else if (TargetDecl->hasAttr<PureAttr>())
+ FuncAttrs |= llvm::Attribute::ReadOnly;
+ }
+
+ QualType RetTy = FI.getReturnType();
+ unsigned Index = 1;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Direct:
+ if (RetTy->isPromotableIntegerType()) {
+ if (RetTy->isSignedIntegerType()) {
+ RetAttrs |= llvm::Attribute::SExt;
+ } else if (RetTy->isUnsignedIntegerType()) {
+ RetAttrs |= llvm::Attribute::ZExt;
+ }
+ }
+ break;
+
+ case ABIArgInfo::Indirect:
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attribute::StructRet |
+ llvm::Attribute::NoAlias));
+ ++Index;
+ // sret disables readnone and readonly
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::Coerce:
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ if (RetAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+
+ // FIXME: we need to honour command line settings also...
+ // FIXME: RegParm should be reduced in case of nested functions and/or global
+ // register variable.
+ signed RegParm = 0;
+ if (TargetDecl)
+ if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>())
+ RegParm = RegParmAttr->getNumParams();
+
+ unsigned PointerWidth = getContext().Target.getPointerWidth(0);
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ QualType ParamType = it->type;
+ const ABIArgInfo &AI = it->info;
+ unsigned Attributes = 0;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Coerce:
+ break;
+
+ case ABIArgInfo::Indirect:
+ Attributes |= llvm::Attribute::ByVal;
+ Attributes |=
+ llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+ // byval disables readnone and readonly.
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Direct:
+ if (ParamType->isPromotableIntegerType()) {
+ if (ParamType->isSignedIntegerType()) {
+ Attributes |= llvm::Attribute::SExt;
+ } else if (ParamType->isUnsignedIntegerType()) {
+ Attributes |= llvm::Attribute::ZExt;
+ }
+ }
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attributes |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+ break;
+
+ case ABIArgInfo::Ignore:
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Expand: {
+ std::vector<const llvm::Type*> Tys;
+ // FIXME: This is rather inefficient. Do we ever actually need to do
+ // anything here? The result should be just reconstructed on the other
+ // side, so extension should be a non-issue.
+ getTypes().GetExpandedTypes(ParamType, Tys);
+ Index += Tys.size();
+ continue;
+ }
+ }
+
+ if (Attributes)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+ ++Index;
+ }
+ if (FuncAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ // FIXME: We no longer need the types from FunctionArgList; lift up and
+ // simplify.
+
+ // Emit allocs for param decls. Give the LLVM Argument nodes names.
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+
+ // Name the struct return argument.
+ if (CGM.ReturnTypeUsesSret(FI)) {
+ AI->setName("agg.result");
+ ++AI;
+ }
+
+ assert(FI.arg_size() == Args.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it) {
+ const VarDecl *Arg = i->first;
+ QualType Ty = info_it->type;
+ const ABIArgInfo &ArgI = info_it->info;
+
+ switch (ArgI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ llvm::Value* V = AI;
+ if (hasAggregateLLVMType(Ty)) {
+ // Do nothing, aggregates and complex variables are accessed by
+ // reference.
+ } else {
+ // Load scalar value from indirect argument.
+ V = EmitLoadOfScalar(V, false, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ case ABIArgInfo::Direct: {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value* V = AI;
+ if (hasAggregateLLVMType(Ty)) {
+ // Create a temporary alloca to hold the argument; the rest of
+ // codegen expects to access aggregates & complex values by
+ // reference.
+ V = CreateTempAlloca(ConvertTypeForMem(Ty));
+ Builder.CreateStore(AI, V);
+ } else {
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ std::string Name = Arg->getNameAsString();
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
+ (Name + ".addr").c_str());
+ // FIXME: What are the right qualifiers here?
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
+ EmitParmDecl(*Arg, Temp);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Name + "." + llvm::utostr(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty)) {
+ EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
+ } else {
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+ }
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Coerce: {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
+ // result in a new alloca anyway, so we could just store into that
+ // directly if we broke the abstraction down more.
+ llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
+ CreateCoercedStore(AI, V, *this);
+ // Match to what EmitParmDecl is expecting for this type.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ V = EmitLoadOfScalar(V, false, Ty);
+ if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+ // This must be a promotion, for something like
+ // "void a(x) short x; {..."
+ V = EmitScalarConversion(V, Ty, Arg->getType());
+ }
+ }
+ EmitParmDecl(*Arg, V);
+ break;
+ }
+ }
+
+ ++AI;
+ }
+ assert(AI == Fn->arg_end() && "Argument mismatch!");
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
+ llvm::Value *ReturnValue) {
+ llvm::Value *RV = 0;
+
+ // Functions with no result always return void.
+ if (ReturnValue) {
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, RetTy);
+ }
+ break;
+
+ case ABIArgInfo::Direct:
+ // The internal return value temp always will have
+ // pointer-to-return-type type.
+ RV = Builder.CreateLoad(ReturnValue);
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce:
+ RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ break;
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+ }
+
+ if (RV) {
+ Builder.CreateRet(RV);
+ } else {
+ Builder.CreateRetVoid();
+ }
+}
+
+RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
+ if (ArgType->isReferenceType())
+ return EmitReferenceBindingToExpr(E, ArgType);
+
+ return EmitAnyExprToTemp(E);
+}
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+ llvm::Value *Callee,
+ const CallArgList &CallArgs,
+ const Decl *TargetDecl) {
+ // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ llvm::SmallVector<llvm::Value*, 16> Args;
+
+ // Handle struct-return functions by passing a pointer to the
+ // location that we would like to return into.
+ QualType RetTy = CallInfo.getReturnType();
+ const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+ if (CGM.ReturnTypeUsesSret(CallInfo)) {
+ // Create a temporary alloca to hold the result of the call. :(
+ Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
+ }
+
+ assert(CallInfo.arg_size() == CallArgs.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ I != E; ++I, ++info_it) {
+ const ABIArgInfo &ArgInfo = info_it->info;
+ RValue RV = I->first;
+
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RV.isScalar() || RV.isComplex()) {
+ // Make a temporary alloca to pass the argument.
+ Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
+ if (RV.isScalar())
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
+ else
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ } else {
+ Args.push_back(RV.getAggregateAddr());
+ }
+ break;
+
+ case ABIArgInfo::Direct:
+ if (RV.isScalar()) {
+ Args.push_back(RV.getScalarVal());
+ } else if (RV.isComplex()) {
+ llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
+ Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
+ Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
+ Args.push_back(Tmp);
+ } else {
+ Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ }
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Coerce: {
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *SrcPtr;
+ if (RV.isScalar()) {
+ SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
+ } else if (RV.isComplex()) {
+ SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
+ StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+ } else
+ SrcPtr = RV.getAggregateAddr();
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ ExpandTypeToArgs(I->second, RV, Args);
+ break;
+ }
+ }
+
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.end());
+
+ llvm::CallSite CS;
+ if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
+ CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
+ } else {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
+ Args.data(), Args.data()+Args.size());
+ EmitBlock(Cont);
+ }
+
+ CS.setAttributes(Attrs);
+ if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
+ CS.setCallingConv(F->getCallingConv());
+
+ // If the call doesn't return, finish the basic block and clear the
+ // insertion point; this allows the rest of IRgen to discard
+ // unreachable code.
+ if (CS.doesNotReturn()) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in
+ // generally are not ready to handle emitting expressions at unreachable
+ // points.
+ EnsureInsertPoint();
+
+ // Return a reasonable RValue.
+ return GetUndefRValue(RetTy);
+ }
+
+ llvm::Instruction *CI = CS.getInstruction();
+ if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
+ CI->setName("call");
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect:
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(Args[0]);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
+
+ case ABIArgInfo::Direct:
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
+ Builder.CreateStore(CI, V);
+ return RValue::getAggregate(V);
+ }
+ return RValue::get(CI);
+
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Coerce: {
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
+ CreateCoercedStore(CI, V, *this);
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(V, false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(V);
+ return RValue::get(EmitLoadOfScalar(V, false, RetTy));
+ }
+
+ case ABIArgInfo::Expand:
+ assert(0 && "Invalid ABI kind for return argument");
+ }
+
+ assert(0 && "Unhandled ABIArgInfo::Kind");
+ return RValue::get(0);
+}
+
+/* VarArg handling */
+
+llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
+ return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+}
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..daf6f00
--- /dev/null
+++ b/lib/CodeGen/CGCall.h
@@ -0,0 +1,104 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCALL_H
+#define CLANG_CODEGEN_CGCALL_H
+
+#include <llvm/ADT/FoldingSet.h>
+#include "clang/AST/Type.h"
+
+#include "CGValue.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+ struct AttributeWithIndex;
+ class Function;
+ class Type;
+ class Value;
+
+ template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class FunctionDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+
+ /// CallArgList - Type for representing both the value and type of
+ /// arguments in a call.
+ typedef llvm::SmallVector<std::pair<RValue, QualType>, 16> CallArgList;
+
+ /// FunctionArgList - Type for representing both the decl and type
+ /// of parameters to a function. The decl must be either a
+ /// ParmVarDecl or ImplicitParamDecl.
+ typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
+ 16> FunctionArgList;
+
+ /// CGFunctionInfo - Class to encapsulate the information about a
+ /// function definition.
+ class CGFunctionInfo : public llvm::FoldingSetNode {
+ struct ArgInfo {
+ QualType type;
+ ABIArgInfo info;
+ };
+
+ unsigned NumArgs;
+ ArgInfo *Args;
+
+ public:
+ typedef const ArgInfo *const_arg_iterator;
+ typedef ArgInfo *arg_iterator;
+
+ CGFunctionInfo(QualType ResTy,
+ const llvm::SmallVector<QualType, 16> &ArgTys);
+ ~CGFunctionInfo() { delete[] Args; }
+
+ const_arg_iterator arg_begin() const { return Args + 1; }
+ const_arg_iterator arg_end() const { return Args + 1 + NumArgs; }
+ arg_iterator arg_begin() { return Args + 1; }
+ arg_iterator arg_end() { return Args + 1 + NumArgs; }
+
+ unsigned arg_size() const { return NumArgs; }
+
+ QualType getReturnType() const { return Args[0].type; }
+
+ ABIArgInfo &getReturnInfo() { return Args[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ getReturnType().Profile(ID);
+ for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
+ it->type.Profile(ID);
+ }
+ template<class Iterator>
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType ResTy,
+ Iterator begin,
+ Iterator end) {
+ ResTy.Profile(ID);
+ for (; begin != end; ++begin)
+ begin->Profile(ID);
+ }
+ };
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..049e716
--- /dev/null
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,987 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/CompileOptions.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/System/Path.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule *m)
+ : M(m), isMainCompileUnitCreated(false), DebugFactory(M->getModule()),
+ BlockLiteralGenericSet(false) {
+}
+
+CGDebugInfo::~CGDebugInfo() {
+ assert(RegionStack.empty() && "Region stack mismatch, stack not empty!");
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+ if (Loc.isValid())
+ CurLoc = M->getContext().getSourceManager().getInstantiationLoc(Loc);
+}
+
+/// getOrCreateCompileUnit - Get the compile unit from the cache or create a new
+/// one if necessary. This returns null for invalid source locations.
+llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
+ // Get source file information.
+ const char *FileName = "<unknown>";
+ SourceManager &SM = M->getContext().getSourceManager();
+ unsigned FID = 0;
+ if (Loc.isValid()) {
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ FileName = PLoc.getFilename();
+ FID = PLoc.getIncludeLoc().getRawEncoding();
+ }
+
+ // See if this compile unit has been used before.
+ llvm::DICompileUnit &Unit = CompileUnitCache[FID];
+ if (!Unit.isNull()) return Unit;
+
+ // Get absolute path name.
+ llvm::sys::Path AbsFileName(FileName);
+ if (!AbsFileName.isAbsolute()) {
+ llvm::sys::Path tmp = llvm::sys::Path::GetCurrentDirectory();
+ tmp.appendComponent(FileName);
+ AbsFileName = tmp;
+ }
+
+ // See if thie compile unit is representing main source file. Each source
+ // file has corresponding compile unit. There is only one main source
+ // file at a time.
+ bool isMain = false;
+ const LangOptions &LO = M->getLangOptions();
+ const char *MainFileName = LO.getMainFileName();
+ if (isMainCompileUnitCreated == false) {
+ if (MainFileName) {
+ if (!strcmp(AbsFileName.getLast().c_str(), MainFileName))
+ isMain = true;
+ } else {
+ if (Loc.isValid() && SM.isFromMainFile(Loc))
+ isMain = true;
+ }
+ if (isMain)
+ isMainCompileUnitCreated = true;
+ }
+
+ unsigned LangTag;
+ if (LO.CPlusPlus) {
+ if (LO.ObjC1)
+ LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ } else if (LO.ObjC1) {
+ LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.C99) {
+ LangTag = llvm::dwarf::DW_LANG_C99;
+ } else {
+ LangTag = llvm::dwarf::DW_LANG_C89;
+ }
+
+ std::string Producer = "clang 1.0";// FIXME: clang version.
+ bool isOptimized = LO.Optimize;
+ const char *Flags = ""; // FIXME: Encode command line options.
+
+ // Figure out which version of the ObjC runtime we have.
+ unsigned RuntimeVers = 0;
+ if (LO.ObjC1)
+ RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+
+ // Create new compile unit.
+ return Unit = DebugFactory.CreateCompileUnit(LangTag, AbsFileName.getLast(),
+ AbsFileName.getDirname(),
+ Producer, isMain, isOptimized,
+ Flags, RuntimeVers);
+}
+
+/// CreateType - Get the Basic type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
+ llvm::DICompileUnit Unit) {
+ unsigned Encoding = 0;
+ switch (BT->getKind()) {
+ default:
+ case BuiltinType::Void:
+ return llvm::DIType();
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
+ case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::Float:
+ case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ }
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(BT);
+ uint64_t Align = M->getContext().getTypeAlign(BT);
+ uint64_t Offset = 0;
+
+ return DebugFactory.CreateBasicType(Unit,
+ BT->getName(M->getContext().getLangOptions().CPlusPlus),
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
+ llvm::DICompileUnit Unit) {
+ // Bit size, align and offset of the type.
+ unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
+ if (Ty->isComplexIntegerType())
+ Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+ uint64_t Offset = 0;
+
+ return DebugFactory.CreateBasicType(Unit, "complex",
+ Unit, 0, Size, Align,
+ Offset, /*flags*/ 0, Encoding);
+}
+
+/// getOrCreateCVRType - Get the CVR qualified type from the cache or create
+/// a new one if necessary.
+llvm::DIType CGDebugInfo::CreateCVRType(QualType Ty, llvm::DICompileUnit Unit) {
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ llvm::DIType FromTy;
+ unsigned Tag;
+ if (Ty.isConstQualified()) {
+ Tag = llvm::dwarf::DW_TAG_const_type;
+ Ty.removeConst();
+ FromTy = getOrCreateType(Ty, Unit);
+ } else if (Ty.isVolatileQualified()) {
+ Tag = llvm::dwarf::DW_TAG_volatile_type;
+ Ty.removeVolatile();
+ FromTy = getOrCreateType(Ty, Unit);
+ } else {
+ assert(Ty.isRestrictQualified() && "Unknown type qualifier for debug info");
+ Tag = llvm::dwarf::DW_TAG_restrict_type;
+ Ty.removeRestrict();
+ FromTy = getOrCreateType(Ty, Unit);
+ }
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ return DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(),
+ 0, 0, 0, 0, 0, FromTy);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DICompileUnit Unit) {
+ llvm::DIType EltTy = getOrCreateType(Ty->getPointeeType(), Unit);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+ "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0, EltTy);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
+ llvm::DICompileUnit Unit) {
+ if (BlockLiteralGenericSet)
+ return BlockLiteralGeneric;
+
+ llvm::DICompileUnit DefUnit;
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ llvm::DIType FieldTy;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIArray Elements;
+ llvm::DIType EltTy, DescTy;
+
+ FieldOffset = 0;
+ FType = M->getContext().UnsignedLongTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "reserved", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().UnsignedLongTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "Size", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+ EltTys.clear();
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
+ DefUnit, 0, FieldOffset, 0, 0, 0,
+ llvm::DIType(), Elements);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+ Unit, "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0, EltTy);
+
+ FieldOffset = 0;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__isa", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().IntTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__flags", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().IntTy;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__reserved", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__FuncPtr", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = DescTy;
+ FieldSize = M->getContext().getTypeSize(Ty);
+ FieldAlign = M->getContext().getTypeAlign(Ty);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__descriptor", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
+ DefUnit, 0, FieldOffset, 0, 0, 0,
+ llvm::DIType(), Elements);
+
+ BlockLiteralGenericSet = true;
+ BlockLiteralGeneric
+ = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+ "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0, EltTy);
+ return BlockLiteralGeneric;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
+ llvm::DICompileUnit Unit) {
+ // Typedefs are derived from some other type. If we have a typedef of a
+ // typedef, make sure to emit the whole chain.
+ llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+
+ // We don't set size information, but do specify where the typedef was
+ // declared.
+ std::string TyName = Ty->getDecl()->getNameAsString();
+ SourceLocation DefLoc = Ty->getDecl()->getLocation();
+ llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(DefLoc);
+
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
+ unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+ return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef, Unit,
+ TyName, DefUnit, Line, 0, 0, 0, 0, Src);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
+ llvm::DICompileUnit Unit) {
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+
+ // Set up remainder of arguments if there is a prototype.
+ // FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
+ if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ } else {
+ // FIXME: Handle () case in C. llvm-gcc doesn't do it either.
+ }
+
+ llvm::DIArray EltTypeArray =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+ Unit, "", llvm::DICompileUnit(),
+ 0, 0, 0, 0, 0,
+ llvm::DIType(), EltTypeArray);
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
+ llvm::DICompileUnit Unit) {
+ RecordDecl *Decl = Ty->getDecl();
+
+ unsigned Tag;
+ if (Decl->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (Decl->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else {
+ assert(Decl->isClass() && "Unknown RecordType!");
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+
+ SourceManager &SM = M->getContext().getSourceManager();
+
+ // Get overall information about the record type for the debug info.
+ std::string Name = Decl->getNameAsString();
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ llvm::DICompileUnit DefUnit;
+ unsigned Line = 0;
+ if (!PLoc.isInvalid()) {
+ DefUnit = getOrCreateCompileUnit(Decl->getLocation());
+ Line = PLoc.getLine();
+ }
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DIType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray());
+
+ // If this is just a forward declaration, return it.
+ if (!Decl->getDefinition(M->getContext()))
+ return FwdDecl;
+
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ const ASTRecordLayout &RL = M->getContext().getASTRecordLayout(Decl);
+
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator I = Decl->field_begin(M->getContext()),
+ E = Decl->field_end(M->getContext());
+ I != E; ++I, ++FieldNo) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+ std::string FieldName = Field->getNameAsString();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ SourceLocation FieldDefLoc = Field->getLocation();
+ PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
+ llvm::DICompileUnit FieldDefUnit;
+ unsigned FieldLine = 0;
+
+ if (!PLoc.isInvalid()) {
+ FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
+ FieldLine = PLoc.getLine();
+ }
+
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = M->getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue();
+
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ llvm::DIType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
+ Align, 0, 0, llvm::DIType(), Elements);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
+ FwdDecl.getGV()->eraseFromParent();
+
+ return RealDecl;
+}
+
+/// CreateType - get objective-c interface type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+ llvm::DICompileUnit Unit) {
+ ObjCInterfaceDecl *Decl = Ty->getDecl();
+
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+ SourceManager &SM = M->getContext().getSourceManager();
+
+ // Get overall information about the record type for the debug info.
+ std::string Name = Decl->getNameAsString();
+
+ llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(Decl->getLocation());
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+ unsigned RuntimeLang = DefUnit.getLanguage();
+
+ // To handle recursive interface, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+ llvm::DIType FwdDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
+ llvm::DIType(), llvm::DIArray(),
+ RuntimeLang);
+
+ // If this is just a forward declaration, return it.
+ if (Decl->isForwardDecl())
+ return FwdDecl;
+
+ // Otherwise, insert it into the TypeCache so that recursive uses will find
+ // it.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
+ // Convert all the elements.
+ llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+ ObjCInterfaceDecl *SClass = Decl->getSuperClass();
+ if (SClass) {
+ llvm::DIType SClassTy =
+ getOrCreateType(M->getContext().getObjCInterfaceType(SClass), Unit);
+ llvm::DIType InhTag =
+ DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+ Unit, "", llvm::DICompileUnit(), 0, 0, 0,
+ 0 /* offset */, 0, SClassTy);
+ EltTys.push_back(InhTag);
+ }
+
+ const ASTRecordLayout &RL = M->getContext().getASTObjCInterfaceLayout(Decl);
+
+ unsigned FieldNo = 0;
+ for (ObjCInterfaceDecl::ivar_iterator I = Decl->ivar_begin(),
+ E = Decl->ivar_end(); I != E; ++I, ++FieldNo) {
+ ObjCIvarDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+ std::string FieldName = Field->getNameAsString();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ SourceLocation FieldDefLoc = Field->getLocation();
+ llvm::DICompileUnit FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
+ PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
+ unsigned FieldLine = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = M->getContext().getTypeSize(FType);
+ Expr *BitWidth = Field->getBitWidth();
+ if (BitWidth)
+ FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue();
+
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ }
+
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+ Flags = llvm::DIType::FlagProtected;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+ Flags = llvm::DIType::FlagPrivate;
+
+ // Create a DW_TAG_member node to remember the offset of this field in the
+ // struct. FIXME: This is an absolutely insane way to capture this
+ // information. When we gut debug info, this should be fixed.
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements =
+ DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ llvm::DIType RealDecl =
+ DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
+ Align, 0, 0, llvm::DIType(), Elements,
+ RuntimeLang);
+
+ // Now that we have a real decl for the struct, replace anything using the
+ // old decl with the new one. This will recursively update the debug info.
+ FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
+ FwdDecl.getGV()->eraseFromParent();
+
+ return RealDecl;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
+ llvm::DICompileUnit Unit) {
+ EnumDecl *Decl = Ty->getDecl();
+
+ llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+
+ // Create DIEnumerator elements for each enumerator.
+ for (EnumDecl::enumerator_iterator
+ Enum = Decl->enumerator_begin(M->getContext()),
+ EnumEnd = Decl->enumerator_end(M->getContext());
+ Enum != EnumEnd; ++Enum) {
+ Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getNameAsString(),
+ Enum->getInitVal().getZExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DIArray EltArray =
+ DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+
+ std::string EnumName = Decl->getNameAsString();
+ SourceLocation DefLoc = Decl->getLocation();
+ llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(DefLoc);
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
+ unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+ // Size and align of the type.
+ uint64_t Size = 0;
+ unsigned Align = 0;
+ if (!Ty->isIncompleteType()) {
+ Size = M->getContext().getTypeSize(Ty);
+ Align = M->getContext().getTypeAlign(Ty);
+ }
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
+ Unit, EnumName, DefUnit, Line,
+ Size, Align, 0, 0,
+ llvm::DIType(), EltArray);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
+ llvm::DICompileUnit Unit) {
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return CreateType(RT, Unit);
+ else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
+ return CreateType(ET, Unit);
+
+ return llvm::DIType();
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
+ llvm::DICompileUnit Unit) {
+ uint64_t Size;
+ uint64_t Align;
+
+
+ // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ Size = 0;
+ Align =
+ M->getContext().getTypeAlign(M->getContext().getBaseElementType(VAT));
+ } else if (Ty->isIncompleteArrayType()) {
+ Size = 0;
+ Align = M->getContext().getTypeAlign(Ty->getElementType());
+ } else {
+ // Size and align of the whole array, not the element type.
+ Size = M->getContext().getTypeSize(Ty);
+ Align = M->getContext().getTypeAlign(Ty);
+ }
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+ QualType EltTy(Ty, 0);
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ uint64_t Upper = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ Upper = CAT->getSize().getZExtValue() - 1;
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
+ EltTy = Ty->getElementType();
+ }
+
+ llvm::DIArray SubscriptArray =
+ DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+
+ return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
+ Unit, "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0,
+ getOrCreateType(EltTy, Unit),
+ SubscriptArray);
+}
+
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
+ llvm::DICompileUnit Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Check to see if the compile unit already has created this type.
+ llvm::DIType &Slot = TypeCache[Ty.getAsOpaquePtr()];
+ if (!Slot.isNull()) return Slot;
+
+ // Handle CVR qualifiers, which recursively handles what they refer to.
+ if (Ty.getCVRQualifiers())
+ return Slot = CreateCVRType(Ty, Unit);
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Dependent types cannot show up in debug information");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::ExtQual:
+ case Type::FixedWidthInt:
+ case Type::MemberPointer:
+ case Type::TemplateSpecialization:
+ case Type::QualifiedName:
+ // Unsupported types
+ return llvm::DIType();
+ case Type::ObjCQualifiedId: // Encode id<p> in debug info just like id.
+ return Slot = getOrCreateType(M->getContext().getObjCIdType(), Unit);
+
+ case Type::ObjCQualifiedInterface: // Drop protocols from interface.
+ case Type::ObjCInterface:
+ return Slot = CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin: return Slot = CreateType(cast<BuiltinType>(Ty), Unit);
+ case Type::Complex: return Slot = CreateType(cast<ComplexType>(Ty), Unit);
+ case Type::Pointer: return Slot = CreateType(cast<PointerType>(Ty), Unit);
+ case Type::BlockPointer:
+ return Slot = CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef: return Slot = CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Record:
+ case Type::Enum:
+ return Slot = CreateType(cast<TagType>(Ty), Unit);
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return Slot = CreateType(cast<FunctionType>(Ty), Unit);
+
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ return Slot = CreateType(cast<ArrayType>(Ty), Unit);
+ case Type::TypeOfExpr:
+ return Slot = getOrCreateType(cast<TypeOfExprType>(Ty)->getUnderlyingExpr()
+ ->getType(), Unit);
+ case Type::TypeOf:
+ return Slot = getOrCreateType(cast<TypeOfType>(Ty)->getUnderlyingType(),
+ Unit);
+ }
+
+ return Slot;
+}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function -
+/// "llvm.dbg.func.start.".
+void CGDebugInfo::EmitFunctionStart(const char *Name, QualType ReturnType,
+ llvm::Function *Fn,
+ CGBuilderTy &Builder) {
+ const char *LinkageName = Name;
+
+ // Skip the asm prefix if it exists.
+ //
+ // FIXME: This should probably be the unmangled name?
+ if (Name[0] == '\01')
+ ++Name;
+
+ // FIXME: Why is this using CurLoc???
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
+ SourceManager &SM = M->getContext().getSourceManager();
+ unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine();
+
+ llvm::DISubprogram SP =
+ DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
+ getOrCreateType(ReturnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/);
+
+ DebugFactory.InsertSubprogramStart(SP, Builder.GetInsertBlock());
+
+ // Push function on region stack.
+ RegionStack.push_back(SP);
+}
+
+
+void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
+ if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
+
+ // Don't bother if things are the same as last time.
+ SourceManager &SM = M->getContext().getSourceManager();
+ if (CurLoc == PrevLoc
+ || (SM.getInstantiationLineNumber(CurLoc) ==
+ SM.getInstantiationLineNumber(PrevLoc)
+ && SM.isFromSameFile(CurLoc, PrevLoc)))
+ return;
+
+ // Update last state.
+ PrevLoc = CurLoc;
+
+ // Get the appropriate compile unit.
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
+ PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
+ DebugFactory.InsertStopPoint(Unit, PLoc.getLine(), PLoc.getColumn(),
+ Builder.GetInsertBlock());
+}
+
+/// EmitRegionStart- Constructs the debug code for entering a declarative
+/// region - "llvm.dbg.region.start.".
+void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
+ llvm::DIDescriptor D;
+ if (!RegionStack.empty())
+ D = RegionStack.back();
+ D = DebugFactory.CreateBlock(D);
+ RegionStack.push_back(D);
+ DebugFactory.InsertRegionStart(D, Builder.GetInsertBlock());
+}
+
+/// EmitRegionEnd - Constructs the debug code for exiting a declarative
+/// region - "llvm.dbg.region.end."
+void CGDebugInfo::EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Provide an region stop point.
+ EmitStopPoint(Fn, Builder);
+
+ DebugFactory.InsertRegionEnd(RegionStack.back(), Builder.GetInsertBlock());
+ RegionStack.pop_back();
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const VarDecl *Decl, unsigned Tag,
+ llvm::Value *Storage, CGBuilderTy &Builder) {
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Do not emit variable debug information while generating optimized code.
+ // The llvm optimizer and code generator are not yet ready to support
+ // optimized code debugging.
+ const CompileOptions &CO = M->getCompileOpts();
+ if (CO.OptimizationLevel)
+ return;
+
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
+ llvm::DIType Ty = getOrCreateType(Decl->getType(), Unit);
+
+ // Get location information.
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned Line = 0;
+ if (!PLoc.isInvalid())
+ Line = PLoc.getLine();
+ else
+ Unit = llvm::DICompileUnit();
+
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateVariable(Tag, RegionStack.back(),Decl->getNameAsString(),
+ Unit, Line, Ty);
+ // Insert an llvm.dbg.declare into the current block.
+ DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *Decl,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ EmitDeclare(Decl, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder);
+}
+
+/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+/// variable declaration.
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder) {
+ EmitDeclare(Decl, llvm::dwarf::DW_TAG_arg_variable, AI, Builder);
+}
+
+
+
+/// EmitGlobalVariable - Emit information about a global variable.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ const VarDecl *Decl) {
+
+ // Do not emit variable debug information while generating optimized code.
+ // The llvm optimizer and code generator are not yet ready to support
+ // optimized code debugging.
+ const CompileOptions &CO = M->getCompileOpts();
+ if (CO.OptimizationLevel)
+ return;
+
+ // Create global variable debug descriptor.
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+ std::string Name = Decl->getNameAsString();
+
+ QualType T = Decl->getType();
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = M->getContext().getAsArrayType(T)->getElementType();
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, "", Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
+/// EmitGlobalVariable - Emit information about an objective-c interface.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ ObjCInterfaceDecl *Decl) {
+ // Create global variable debug descriptor.
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+ std::string Name = Decl->getNameAsString();
+
+ QualType T = M->getContext().getObjCInterfaceType(Decl);
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = M->getContext().getAsArrayType(T)->getElementType();
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DebugFactory.CreateGlobalVariable(Unit, Name, Name, "", Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(),
+ true/*definition*/, Var);
+}
+
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..de65580
--- /dev/null
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,126 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
+#define CLANG_CODEGEN_CGDEBUGINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include <map>
+
+#include "CGBuilder.h"
+
+namespace clang {
+ class VarDecl;
+ class ObjCInterfaceDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
+/// the backend.
+class CGDebugInfo {
+ CodeGenModule *M;
+ bool isMainCompileUnitCreated;
+ llvm::DIFactory DebugFactory;
+
+ SourceLocation CurLoc, PrevLoc;
+
+ /// CompileUnitCache - Cache of previously constructed CompileUnits.
+ llvm::DenseMap<unsigned, llvm::DICompileUnit> CompileUnitCache;
+
+ /// TypeCache - Cache of previously constructed Types.
+ // FIXME: Eliminate this map. Be careful of iterator invalidation.
+ std::map<void *, llvm::DIType> TypeCache;
+
+ bool BlockLiteralGenericSet;
+ llvm::DIType BlockLiteralGeneric;
+
+ std::vector<llvm::DIDescriptor> RegionStack;
+
+ /// Helper functions for getOrCreateType.
+ llvm::DIType CreateType(const BuiltinType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateCVRType(QualType Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const TypedefType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const PointerType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const FunctionType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const TagType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const RecordType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const EnumType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const ArrayType *Ty, llvm::DICompileUnit U);
+
+public:
+ CGDebugInfo(CodeGenModule *m);
+ ~CGDebugInfo();
+
+ /// setLocation - Update the current source location. If \arg loc is
+ /// invalid it is ignored.
+ void setLocation(SourceLocation Loc);
+
+ /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
+ /// source line.
+ void EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
+ /// start of a new function.
+ void EmitFunctionStart(const char *Name, QualType ReturnType,
+ llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
+ /// of a new block.
+ void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
+ /// block.
+ void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
+ /// variable declaration.
+ void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+ /// variable declaration.
+ void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitGlobalVariable - Emit information about a global variable.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+ /// EmitGlobalVariable - Emit information about an objective-c interface.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+
+private:
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+
+ /// getOrCreateCompileUnit - Get the compile unit from the cache or create a
+ /// new one if necessary.
+ llvm::DICompileUnit getOrCreateCompileUnit(SourceLocation Loc);
+
+ /// getOrCreateType - Get the type from the cache or create a new type if
+ /// necessary.
+ llvm::DIType getOrCreateType(QualType Ty, llvm::DICompileUnit Unit);
+};
+} // namespace CodeGen
+} // namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..bcad77b
--- /dev/null
+++ b/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,489 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+ switch (D.getKind()) {
+ default: assert(0 && "Unknown decl kind!");
+ case Decl::ParmVar:
+ assert(0 && "Parmdecls should not be in declstmts!");
+ case Decl::Function: // void X();
+ case Decl::Record: // struct/union/class X;
+ case Decl::Enum: // enum X;
+ case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ // None of these decls require codegen support.
+ return;
+
+ case Decl::Var: {
+ const VarDecl &VD = cast<VarDecl>(D);
+ assert(VD.isBlockVarDecl() &&
+ "Should not see file-scope variables inside a function!");
+ return EmitBlockVarDecl(VD);
+ }
+
+ case Decl::Typedef: { // typedef int X;
+ const TypedefDecl &TD = cast<TypedefDecl>(D);
+ QualType Ty = TD.getUnderlyingType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+ }
+}
+
+/// EmitBlockVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
+ if (D.hasAttr<AsmLabelAttr>())
+ CGM.ErrorUnsupported(&D, "__asm__");
+
+ switch (D.getStorageClass()) {
+ case VarDecl::None:
+ case VarDecl::Auto:
+ case VarDecl::Register:
+ return EmitLocalBlockVarDecl(D);
+ case VarDecl::Static:
+ return EmitStaticBlockVarDecl(D);
+ case VarDecl::Extern:
+ case VarDecl::PrivateExtern:
+ // Don't emit it now, allow it to be emitted lazily on its first use.
+ return;
+ }
+
+ assert(0 && "Unknown storage class");
+}
+
+llvm::GlobalVariable *
+CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes
+ Linkage) {
+ QualType Ty = D.getType();
+ assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+ std::string Name;
+ if (getContext().getLangOptions().CPlusPlus) {
+ Name = CGM.getMangledName(&D);
+ } else {
+ std::string ContextName;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurFuncDecl))
+ ContextName = CGM.getMangledName(FD);
+ else if (isa<ObjCMethodDecl>(CurFuncDecl))
+ ContextName = std::string(CurFn->getNameStart(),
+ CurFn->getNameStart() + CurFn->getNameLen());
+ else
+ assert(0 && "Unknown context for block var decl");
+
+ Name = ContextName + Separator + D.getNameAsString();
+ }
+
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ return new llvm::GlobalVariable(LTy, Ty.isConstant(getContext()), Linkage,
+ llvm::Constant::getNullValue(LTy), Name,
+ &CGM.getModule(), D.isThreadSpecified(),
+ Ty.getAddressSpace());
+}
+
+void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+
+ llvm::GlobalVariable *GV =
+ CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage);
+
+ // Store into LocalDeclMap before generating initializer to handle
+ // circular references.
+ DMEntry = GV;
+
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (D.getType()->isVariablyModifiedType())
+ EmitVLASize(D.getType());
+
+ if (D.getType()->isReferenceType()) {
+ CGM.ErrorUnsupported(&D, "static declaration with reference type");
+ return;
+ }
+
+ if (D.getInit()) {
+ llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
+
+ // If constant emission failed, then this should be a C++ static
+ // initializer.
+ if (!Init) {
+ if (!getContext().getLangOptions().CPlusPlus)
+ CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else
+ GenerateStaticCXXBlockVarDeclInit(D, GV);
+ } else {
+ // The initializer may differ in type from the global. Rewrite
+ // the global to match the initializer. (We have to do this
+ // because some types, like unions, can't be completely represented
+ // in the LLVM type system.)
+ if (GV->getType() != Init->getType()) {
+ llvm::GlobalVariable *OldGV = GV;
+
+ GV = new llvm::GlobalVariable(Init->getType(), OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ &CGM.getModule(), D.isThreadSpecified(),
+ D.getType().getAddressSpace());
+
+ // Steal the name of the old global
+ GV->takeName(OldGV);
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ OldGV->eraseFromParent();
+ }
+
+ GV->setInitializer(Init);
+ }
+ }
+
+ // FIXME: Merge attribute handling.
+ if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ llvm::Constant *Ann =
+ CGM.EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D.getLocation()));
+ CGM.AddAnnotation(Ann);
+ }
+
+ if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ if (D.hasAttr<UsedAttr>())
+ CGM.AddUsedGlobal(GV);
+
+ // We may have to cast the constant because of the initializer
+ // mismatch above.
+ //
+ // FIXME: It is really dangerous to store this in the map; if anyone
+ // RAUW's the GV uses of this constant will be invalid.
+ const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
+ const llvm::Type *LPtrTy =
+ llvm::PointerType::get(LTy, D.getType().getAddressSpace());
+ DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
+
+ // Emit global variable debug descriptor for static vars.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(D.getLocation());
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
+ }
+}
+
+/// BuildByRefType - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper;
+/// void *__destroy_helper;
+/// T x;
+/// } x
+///
+/// Align is the alignment needed in bytes for x.
+const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty,
+ uint64_t Align) {
+ const llvm::Type *LTy = ConvertType(Ty);
+ bool needsCopyDispose = BlockRequiresCopying(Ty);
+ std::vector<const llvm::Type *> Types(needsCopyDispose*2+5);
+ const llvm::PointerType *PtrToInt8Ty
+ = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Types[0] = PtrToInt8Ty;
+ Types[1] = PtrToInt8Ty;
+ Types[2] = llvm::Type::Int32Ty;
+ Types[3] = llvm::Type::Int32Ty;
+ if (needsCopyDispose) {
+ Types[4] = PtrToInt8Ty;
+ Types[5] = PtrToInt8Ty;
+ }
+ // FIXME: Align this on at least an Align boundary.
+ Types[needsCopyDispose*2 + 4] = LTy;
+ return llvm::StructType::get(Types, false);
+}
+
+/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
+ QualType Ty = D.getType();
+ bool isByRef = D.hasAttr<BlocksAttr>();
+ bool needsDispose = false;
+
+ llvm::Value *DeclPtr;
+ if (Ty->isConstantSizeType()) {
+ if (!Target.useGlobalsForAutomaticVariables()) {
+ // A normal fixed sized variable becomes an alloca in the entry block.
+ const llvm::Type *LTy = ConvertTypeForMem(Ty);
+ if (isByRef)
+ LTy = BuildByRefType(Ty, getContext().getDeclAlignInBytes(&D));
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getNameAsString().c_str());
+
+ if (isByRef)
+ Alloc->setAlignment(std::max(getContext().getDeclAlignInBytes(&D),
+ unsigned(Target.getPointerAlign(0) / 8)));
+ else
+ Alloc->setAlignment(getContext().getDeclAlignInBytes(&D));
+ DeclPtr = Alloc;
+ } else {
+ // Targets that don't support recursion emit locals as globals.
+ const char *Class =
+ D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
+ DeclPtr = CreateStaticBlockVarDecl(D, Class,
+ llvm::GlobalValue
+ ::InternalLinkage);
+ }
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ } else {
+ if (!DidCallStackSave) {
+ // Save the stack.
+ const llvm::Type *LTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+
+ Builder.CreateStore(V, Stack);
+
+ DidCallStackSave = true;
+
+ {
+ // Push a cleanup block and restore the stack there.
+ CleanupScope scope(*this);
+
+ V = Builder.CreateLoad(Stack, "tmp");
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ Builder.CreateCall(F, V);
+ }
+ }
+
+ // Get the element type.
+ const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
+ const llvm::Type *LElemPtrTy =
+ llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
+
+ llvm::Value *VLASize = EmitVLASize(Ty);
+
+ // Downcast the VLA size expression
+ VLASize = Builder.CreateIntCast(VLASize, llvm::Type::Int32Ty, false, "tmp");
+
+ // Allocate memory for the array.
+ llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::Int8Ty, VLASize, "vla");
+ DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for local var declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+ }
+ else if (isByRef) {
+ llvm::Value *Loc;
+ bool needsCopyDispose = BlockRequiresCopying(Ty);
+ Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc, false);
+ Loc = Builder.CreateBitCast(Loc, DeclPtr->getType());
+ Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x");
+ DI->EmitDeclareOfAutoVariable(&D, Loc, Builder);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
+
+ // If this local has an initializer, emit it now.
+ if (const Expr *Init = D.getInit()) {
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef) {
+ bool needsCopyDispose = BlockRequiresCopying(Ty);
+ Loc = Builder.CreateStructGEP(DeclPtr, needsCopyDispose*2+4, "x");
+ }
+ if (Ty->isReferenceType()) {
+ llvm::Value *V = EmitReferenceBindingToExpr(Init, Ty).getScalarVal();
+ EmitStoreOfScalar(V, Loc, false, Ty);
+ } else if (!hasAggregateLLVMType(Init->getType())) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(),
+ D.getType());
+ } else if (Init->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified());
+ } else {
+ EmitAggExpr(Init, Loc, D.getType().isVolatileQualified());
+ }
+ }
+ if (isByRef) {
+ const llvm::PointerType *PtrToInt8Ty
+ = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
+ llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
+ llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
+ llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
+ llvm::Value *V;
+ int flag = 0;
+ int flags = 0;
+
+ needsDispose = true;
+
+ if (Ty->isBlockPointerType()) {
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ } else if (BlockRequiresCopying(Ty)) {
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ }
+
+ // FIXME: Someone double check this.
+ if (Ty.isObjCGCWeak())
+ flag |= BLOCK_FIELD_IS_WEAK;
+
+ int isa = 0;
+ if (flag&BLOCK_FIELD_IS_WEAK)
+ isa = 1;
+ V = llvm::ConstantInt::get(llvm::Type::Int32Ty, isa);
+ V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
+ Builder.CreateStore(V, isa_field);
+
+ V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding");
+ Builder.CreateStore(V, forwarding_field);
+
+ V = llvm::ConstantInt::get(llvm::Type::Int32Ty, flags);
+ Builder.CreateStore(V, flags_field);
+
+ const llvm::Type *V1;
+ V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
+ V = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ (CGM.getTargetData().getTypeStoreSizeInBits(V1)
+ / 8));
+ Builder.CreateStore(V, size_field);
+
+ if (flags & BLOCK_HAS_COPY_DISPOSE) {
+ BlockHasCopyDispose = true;
+ llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
+ Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag),
+ copy_helper);
+
+ llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
+ Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag),
+ destroy_helper);
+ }
+ }
+
+ // Handle the cleanup attribute
+ if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+ const FunctionDecl *FD = CA->getFunctionDecl();
+
+ llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD));
+ assert(F && "Could not find function!");
+
+ CleanupScope scope(*this);
+
+ const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
+
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = Info.arg_begin()->type;
+ DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy));
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(DeclPtr),
+ getContext().getPointerType(D.getType())));
+
+ EmitCall(Info, F, Args);
+ }
+
+ if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
+ CleanupScope scope(*this);
+ llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ V = Builder.CreateLoad(V, false);
+ BuildBlockRelease(V);
+ }
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
+ // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+ assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+ "Invalid argument to EmitParmDecl");
+ QualType Ty = D.getType();
+
+ llvm::Value *DeclPtr;
+ if (!Ty->isConstantSizeType()) {
+ // Variable sized values always are passed by-reference.
+ DeclPtr = Arg;
+ } else {
+ // A fixed sized single-value variable becomes an alloca in the entry block.
+ const llvm::Type *LTy = ConvertTypeForMem(Ty);
+ if (LTy->isSingleValueType()) {
+ // TODO: Alignment
+ std::string Name = D.getNameAsString();
+ Name += ".addr";
+ DeclPtr = CreateTempAlloca(LTy);
+ DeclPtr->setName(Name.c_str());
+
+ // Store the initial value into the alloca.
+ EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty);
+ } else {
+ // Otherwise, if this is an aggregate, just use the input pointer.
+ DeclPtr = Arg;
+ }
+ Arg->setName(D.getNameAsString());
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for param declaration.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);
+ }
+}
+
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..c5f2387
--- /dev/null
+++ b/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,1324 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCall.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+// Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
+ const char *Name) {
+ if (!Builder.isNamePreserving())
+ Name = "";
+ return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+ QualType BoolTy = getContext().BoolTy;
+ if (!E->getType()->isAnyComplexType())
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which can have
+/// any type. The result is returned as an RValue struct. If this is an
+/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+/// the result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
+ bool isAggLocVolatile, bool IgnoreResult) {
+ if (!hasAggregateLLVMType(E->getType()))
+ return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ else if (E->getType()->isAnyComplexType())
+ return RValue::getComplex(EmitComplexExpr(E, false, false,
+ IgnoreResult, IgnoreResult));
+
+ EmitAggExpr(E, AggLoc, isAggLocVolatile, IgnoreResult);
+ return RValue::getAggregate(AggLoc, isAggLocVolatile);
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result
+/// will always be accessible even if no aggregate location is
+/// provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc,
+ bool isAggLocVolatile) {
+ if (!AggLoc && hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp");
+ return EmitAnyExpr(E, AggLoc, isAggLocVolatile);
+}
+
+RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+ QualType DestType) {
+ RValue Val;
+ if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+ // Emit the expr as an lvalue.
+ LValue LV = EmitLValue(E);
+ if (LV.isSimple())
+ return RValue::get(LV.getAddress());
+ Val = EmitLoadOfLValue(LV, E->getType());
+ } else {
+ Val = EmitAnyExprToTemp(E);
+ }
+
+ if (Val.isAggregate()) {
+ Val = RValue::get(Val.getAggregateAddr());
+ } else {
+ // Create a temporary variable that we can bind the reference to.
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()),
+ "reftmp");
+ if (Val.isScalar())
+ EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType());
+ else
+ StoreComplexToAddr(Val.getComplexVal(), Temp, false);
+ Val = RValue::get(Temp);
+ }
+
+ return Val;
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return
+/// the input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+ const llvm::Constant *Elts) {
+ if (isa<llvm::ConstantAggregateZero>(Elts))
+ return 0;
+
+ return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+}
+
+
+//===----------------------------------------------------------------------===//
+// LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+ if (Ty->isVoidType()) {
+ return RValue::get(0);
+ } else if (const ComplexType *CTy = Ty->getAsComplexType()) {
+ const llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return RValue::getComplex(std::make_pair(U, U));
+ } else if (hasAggregateLLVMType(Ty)) {
+ const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty));
+ return RValue::getAggregate(llvm::UndefValue::get(LTy));
+ } else {
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+ }
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+ return LValue::MakeAddr(llvm::UndefValue::get(Ty),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield
+/// reference. In either case, the LLVM Value* in the LValue structure is
+/// guaranteed to be an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of
+/// the LLVM value is known: For example, it may not be a pointer to an
+/// integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed
+/// size, this method guarantees that the returned pointer type will point to
+/// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+/// variable length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+ switch (E->getStmtClass()) {
+ default: return EmitUnsupportedLValue(E, "l-value expression");
+
+ case Expr::BinaryOperatorClass:
+ return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ return EmitCallExprLValue(cast<CallExpr>(E));
+ case Expr::VAArgExprClass:
+ return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+ case Expr::DeclRefExprClass:
+ case Expr::QualifiedDeclRefExprClass:
+ return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::PredefinedExprClass:
+ return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ case Expr::StringLiteralClass:
+ return EmitStringLiteralLValue(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+
+ case Expr::BlockDeclRefExprClass:
+ return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+
+ case Expr::CXXConditionDeclExprClass:
+ return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E));
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass:
+ return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+ case Expr::CXXBindTemporaryExprClass:
+ return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+
+ case Expr::ObjCMessageExprClass:
+ return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+ case Expr::ObjCIvarRefExprClass:
+ return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+ case Expr::ObjCPropertyRefExprClass:
+ return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
+ case Expr::ObjCKVCRefExprClass:
+ return EmitObjCKVCRefLValue(cast<ObjCKVCRefExpr>(E));
+ case Expr::ObjCSuperExprClass:
+ return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
+
+ case Expr::StmtExprClass:
+ return EmitStmtExprLValue(cast<StmtExpr>(E));
+ case Expr::UnaryOperatorClass:
+ return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+ case Expr::ArraySubscriptExprClass:
+ return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::ExtVectorElementExprClass:
+ return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::CompoundLiteralExprClass:
+ return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+ case Expr::ConditionalOperatorClass:
+ return EmitConditionalOperator(cast<ConditionalOperator>(E));
+ case Expr::ChooseExprClass:
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ return EmitCastLValue(cast<CastExpr>(E));
+ }
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ QualType Ty) {
+ llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp");
+
+ // Bool can have different representation in memory than in registers.
+ if (Ty->isBooleanType())
+ if (V->getType() != llvm::Type::Int1Ty)
+ V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool");
+
+ return V;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, QualType Ty) {
+
+ if (Ty->isBooleanType()) {
+ // Bool can have different representation in memory than in registers.
+ const llvm::Type *SrcTy = Value->getType();
+ const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
+ if (DstPtr->getElementType() != SrcTy) {
+ const llvm::Type *MemTy =
+ llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
+ Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
+ }
+ }
+
+ Builder.CreateStore(Value, Addr, Volatile);
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+/// this method emits the address of the lvalue, then loads the result as an
+/// rvalue, returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
+ if (LV.isObjCWeak()) {
+ // load of a __weak object.
+ llvm::Value *AddrWeakObj = LV.getAddress();
+ llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj);
+ return RValue::get(read_weak);
+ }
+
+ if (LV.isSimple()) {
+ llvm::Value *Ptr = LV.getAddress();
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+
+ // Simple scalar l-value.
+ if (EltTy->isSingleValueType())
+ return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
+ ExprType));
+
+ assert(ExprType->isFunctionType() && "Unknown scalar value");
+ return RValue::get(Ptr);
+ }
+
+ if (LV.isVectorElt()) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+ return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+ "vecext"));
+ }
+
+ // If this is a reference to a subset of the elements of a vector, either
+ // shuffle the input or extract/insert them as appropriate.
+ if (LV.isExtVectorElt())
+ return EmitLoadOfExtVectorElementLValue(LV, ExprType);
+
+ if (LV.isBitfield())
+ return EmitLoadOfBitfieldLValue(LV, ExprType);
+
+ if (LV.isPropertyRef())
+ return EmitLoadOfPropertyRefLValue(LV, ExprType);
+
+ assert(LV.isKVCRef() && "Unknown LValue type!");
+ return EmitLoadOfKVCRefLValue(LV, ExprType);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
+ QualType ExprType) {
+ unsigned StartBit = LV.getBitfieldStartBit();
+ unsigned BitfieldSize = LV.getBitfieldSize();
+ llvm::Value *Ptr = LV.getBitfieldAddr();
+
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+
+ // In some cases the bitfield may straddle two memory locations.
+ // Currently we load the entire bitfield, then do the magic to
+ // sign-extend it if necessary. This results in somewhat more code
+ // than necessary for the common case (one load), since two shifts
+ // accomplish both the masking and sign extension.
+ unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
+ llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp");
+
+ // Shift to proper location.
+ if (StartBit)
+ Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit),
+ "bf.lo");
+
+ // Mask off unused bits.
+ llvm::Constant *LowMask =
+ llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, LowBits));
+ Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared");
+
+ // Fetch the high bits if necessary.
+ if (LowBits < BitfieldSize) {
+ unsigned HighBits = BitfieldSize - LowBits;
+ llvm::Value *HighPtr =
+ Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
+ "bf.ptr.hi");
+ llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+ LV.isVolatileQualified(),
+ "tmp");
+
+ // Mask off unused bits.
+ llvm::Constant *HighMask =
+ llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, HighBits));
+ HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared");
+
+ // Shift to proper location and or in to bitfield value.
+ HighVal = Builder.CreateShl(HighVal,
+ llvm::ConstantInt::get(EltTy, LowBits));
+ Val = Builder.CreateOr(Val, HighVal, "bf.val");
+ }
+
+ // Sign extend if necessary.
+ if (LV.isBitfieldSigned()) {
+ llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy,
+ EltTySize - BitfieldSize);
+ Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits),
+ ExtraBits, "bf.val.sext");
+ }
+
+ // The bitfield type and the normal type differ when the storage sizes
+ // differ (currently just _Bool).
+ Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp");
+
+ return RValue::get(Val);
+}
+
+RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getPropertyRefExpr());
+}
+
+RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
+ QualType ExprType) {
+ return EmitObjCPropertyGet(LV.getKVCRefExpr());
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
+ QualType ExprType) {
+ llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified(), "tmp");
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+
+ // If the result of the expression is a non-vector type, we must be
+ // extracting a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = ExprType->getAsVectorType();
+ if (!ExprVT) {
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
+ }
+
+ // Always use shuffle vector to try to retain the original program structure
+ unsigned NumResultElts = ExprVT->getNumElements();
+
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumResultElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx));
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ return RValue::get(Vec);
+}
+
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+ QualType Ty) {
+ if (!Dst.isSimple()) {
+ if (Dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getVectorIdx(), "vecins");
+ Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+ return;
+ }
+
+ // If this is an update of extended vector elements, insert them as
+ // appropriate.
+ if (Dst.isExtVectorElt())
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
+
+ if (Dst.isBitfield())
+ return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
+
+ if (Dst.isPropertyRef())
+ return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
+
+ if (Dst.isKVCRef())
+ return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
+
+ assert(0 && "Unknown LValue type");
+ }
+
+ if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+ // load of a __weak object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+ // load of a __strong object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+#if 0
+ // FIXME. We cannot positively determine if we have an 'ivar' assignment,
+ // object assignment or an unknown assignment. For now, generate call to
+ // objc_assign_strongCast assignment which is a safe, but consevative
+ // assumption.
+ if (Dst.isObjCIvar())
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst);
+ else
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
+#endif
+ if (Dst.isGlobalObjCRef())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
+ else
+ CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ assert(Src.isScalar() && "Can't emit an agg store with this method");
+ EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
+ Dst.isVolatileQualified(), Ty);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ QualType Ty,
+ llvm::Value **Result) {
+ unsigned StartBit = Dst.getBitfieldStartBit();
+ unsigned BitfieldSize = Dst.getBitfieldSize();
+ llvm::Value *Ptr = Dst.getBitfieldAddr();
+
+ const llvm::Type *EltTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+
+ // Get the new value, cast to the appropriate type and masked to
+ // exactly the size of the bit-field.
+ llvm::Value *SrcVal = Src.getScalarVal();
+ llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp");
+ llvm::Constant *Mask =
+ llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
+ NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value");
+
+ // Return the new value of the bit-field, if requested.
+ if (Result) {
+ // Cast back to the proper type for result.
+ const llvm::Type *SrcTy = SrcVal->getType();
+ llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false,
+ "bf.reload.val");
+
+ // Sign extend if necessary.
+ if (Dst.isBitfieldSigned()) {
+ unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy);
+ llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy,
+ SrcTySize - BitfieldSize);
+ SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits),
+ ExtraBits, "bf.reload.sext");
+ }
+
+ *Result = SrcTrunc;
+ }
+
+ // In some cases the bitfield may straddle two memory locations.
+ // Emit the low part first and check to see if the high needs to be
+ // done.
+ unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
+ llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
+ "bf.prev.low");
+
+ // Compute the mask for zero-ing the low part of this bitfield.
+ llvm::Constant *InvMask =
+ llvm::ConstantInt::get(~llvm::APInt::getBitsSet(EltTySize, StartBit,
+ StartBit + LowBits));
+
+ // Compute the new low part as
+ // LowVal = (LowVal & InvMask) | (NewVal << StartBit),
+ // with the shift of NewVal implicitly stripping the high bits.
+ llvm::Value *NewLowVal =
+ Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit),
+ "bf.value.lo");
+ LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared");
+ LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo");
+
+ // Write back.
+ Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified());
+
+ // If the low part doesn't cover the bitfield emit a high part.
+ if (LowBits < BitfieldSize) {
+ unsigned HighBits = BitfieldSize - LowBits;
+ llvm::Value *HighPtr =
+ Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
+ "bf.ptr.hi");
+ llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+ Dst.isVolatileQualified(),
+ "bf.prev.hi");
+
+ // Compute the mask for zero-ing the high part of this bitfield.
+ llvm::Constant *InvMask =
+ llvm::ConstantInt::get(~llvm::APInt::getLowBitsSet(EltTySize, HighBits));
+
+ // Compute the new high part as
+ // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits),
+ // where the high bits of NewVal have already been cleared and the
+ // shift stripping the low bits.
+ llvm::Value *NewHighVal =
+ Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits),
+ "bf.value.high");
+ HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared");
+ HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi");
+
+ // Write back.
+ Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified());
+ }
+}
+
+void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+ LValue Dst,
+ QualType Ty) {
+ // This access turns into a read/modify/write of the vector. Load the input
+ // value now.
+ llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified(), "tmp");
+ const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (const VectorType *VTy = Ty->getAsVectorType()) {
+ unsigned NumSrcElts = VTy->getNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ if (NumDstElts == NumSrcElts) {
+ // Use shuffle vector is the src and destination are the same number
+ // of elements
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned InIdx = getAccessedFieldNo(i, Elts);
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx));
+ }
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV, "tmp");
+ }
+ else if (NumDstElts > NumSrcElts) {
+ // Extended the source vector to the same length and then shuffle it
+ // into the destination.
+ // FIXME: since we're shuffling with undef, can we just use the indices
+ // into that? This could be simpler.
+ llvm::SmallVector<llvm::Constant*, 4> ExtMask;
+ unsigned i;
+ for (i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
+ for (; i != NumDstElts; ++i)
+ ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty));
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
+ ExtMask.size());
+ llvm::Value *ExtSrcVal =
+ Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(SrcVal->getType()),
+ ExtMaskV, "tmp");
+ // build identity
+ llvm::SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumDstElts; ++i) {
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
+ }
+ // modify when what gets shuffled in
+ for (unsigned i = 0; i != NumSrcElts; ++i) {
+ unsigned Idx = getAccessedFieldNo(i, Elts);
+ Mask[Idx] =llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts);
+ }
+ llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
+ }
+ else {
+ // We should never shorten the vector
+ assert(0 && "unexpected shorten vector length");
+ }
+ } else {
+ // If the Src is a scalar (not a vector) it must be updating one element.
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
+ }
+
+ Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+ const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
+
+ if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) ||
+ isa<ImplicitParamDecl>(VD))) {
+ LValue LV;
+ bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
+ if (VD->hasExternalStorage()) {
+ llvm::Value *V = CGM.GetAddrOfGlobalVar(VD);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+ LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+ else {
+ llvm::Value *V = LocalDeclMap[VD];
+ assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+ // local variables do not get their gc attribute set.
+ QualType::GCAttrTypes attr = QualType::GCNone;
+ // local static?
+ if (!NonGCable)
+ attr = getContext().getObjCGCAttrKind(E->getType());
+ if (VD->hasAttr<BlocksAttr>()) {
+ bool needsCopyDispose = BlockRequiresCopying(VD->getType());
+ const llvm::Type *PtrStructTy = V->getType();
+ const llvm::Type *Ty = PtrStructTy;
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateStructGEP(V, 1, "forwarding");
+ V = Builder.CreateBitCast(V, Ty);
+ V = Builder.CreateLoad(V, false);
+ V = Builder.CreateBitCast(V, PtrStructTy);
+ V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x");
+ }
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+ LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr);
+ }
+ LValue::SetObjCNonGC(LV, NonGCable);
+ return LV;
+ } else if (VD && VD->isFileVarDecl()) {
+ llvm::Value *V = CGM.GetAddrOfGlobalVar(VD);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+ LValue LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ if (LV.isObjCStrong())
+ LV.SetGlobalObjCRef(LV, true);
+ return LV;
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) {
+ llvm::Value* V = CGM.GetAddrOfFunction(GlobalDecl(FD));
+ if (!FD->hasPrototype()) {
+ if (const FunctionProtoType *Proto =
+ FD->getType()->getAsFunctionProtoType()) {
+ // Ugly case: for a K&R-style definition, the type of the definition
+ // isn't the same as the type of a use. Correct for this with a
+ // bitcast.
+ QualType NoProtoType =
+ getContext().getFunctionNoProtoType(Proto->getResultType());
+ NoProtoType = getContext().getPointerType(NoProtoType);
+ V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp");
+ }
+ }
+ return LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+ else if (const ImplicitParamDecl *IPD =
+ dyn_cast<ImplicitParamDecl>(E->getDecl())) {
+ llvm::Value *V = LocalDeclMap[IPD];
+ assert(V && "BlockVarDecl not entered in LocalDeclMap?");
+ return LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+ assert(0 && "Unimp declref");
+ //an invalid LValue, but the assert will
+ //ensure that this point is never reached.
+ return LValue();
+}
+
+LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
+ return LValue::MakeAddr(GetAddrOfBlockDecl(E),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+ // __extension__ doesn't affect lvalue-ness.
+ if (E->getOpcode() == UnaryOperator::Extension)
+ return EmitLValue(E->getSubExpr());
+
+ QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+ switch (E->getOpcode()) {
+ default: assert(0 && "Unknown unary operator lvalue!");
+ case UnaryOperator::Deref:
+ {
+ QualType T =
+ E->getSubExpr()->getType()->getAsPointerType()->getPointeeType();
+ LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()),
+ ExprTy->getAsPointerType()->getPointeeType()
+ .getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(T));
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ LValue LV = EmitLValue(E->getSubExpr());
+ unsigned Idx = E->getOpcode() == UnaryOperator::Imag;
+ return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(),
+ Idx, "idx"),
+ ExprTy.getCVRQualifiers());
+ }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0);
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0);
+}
+
+
+LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) {
+ std::string GlobalVarName;
+
+ switch (Type) {
+ default:
+ assert(0 && "Invalid type");
+ case PredefinedExpr::Func:
+ GlobalVarName = "__func__.";
+ break;
+ case PredefinedExpr::Function:
+ GlobalVarName = "__FUNCTION__.";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ // FIXME:: Demangle C++ method names
+ GlobalVarName = "__PRETTY_FUNCTION__.";
+ break;
+ }
+
+ // FIXME: This isn't right at all. The logic for computing this should go
+ // into a method on PredefinedExpr. This would allow sema and codegen to be
+ // consistent for things like sizeof(__func__) etc.
+ std::string FunctionName;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
+ FunctionName = CGM.getMangledName(FD);
+ } else {
+ // Just get the mangled name; skipping the asm prefix if it
+ // exists.
+ FunctionName = CurFn->getName();
+ if (FunctionName[0] == '\01')
+ FunctionName = FunctionName.substr(1, std::string::npos);
+ }
+
+ GlobalVarName += FunctionName;
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ return LValue::MakeAddr(C, 0);
+}
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ switch (E->getIdentType()) {
+ default:
+ return EmitUnsupportedLValue(E, "predefined expression");
+ case PredefinedExpr::Func:
+ case PredefinedExpr::Function:
+ case PredefinedExpr::PrettyFunction:
+ return EmitPredefinedFunctionName(E->getIdentType());
+ }
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // The index must always be an integer, which is not an aggregate. Emit it.
+ llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+
+ // If the base is a vector type, then we are forming a vector element lvalue
+ // with this subscript.
+ if (E->getBase()->getType()->isVectorType()) {
+ // Emit the vector as an lvalue to get its address.
+ LValue LHS = EmitLValue(E->getBase());
+ assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+ // FIXME: This should properly sign/zero/extend or truncate Idx to i32.
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+ E->getBase()->getType().getCVRQualifiers());
+ }
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerType();
+ unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (IdxBitwidth != LLVMPointerWidth)
+ Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth),
+ IdxSigned, "idxprom");
+
+ // We know that the pointer points to a type of the correct size,
+ // unless the size is a VLA or Objective-C interface.
+ llvm::Value *Address = 0;
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(E->getType())) {
+ llvm::Value *VLASize = VLASizeMap[VAT];
+
+ Idx = Builder.CreateMul(Idx, VLASize);
+
+ QualType BaseType = getContext().getBaseElementType(VAT);
+
+ uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8;
+ Idx = Builder.CreateUDiv(Idx,
+ llvm::ConstantInt::get(Idx->getType(),
+ BaseTypeSize));
+ Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ } else if (const ObjCInterfaceType *OIT =
+ dyn_cast<ObjCInterfaceType>(E->getType())) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ getContext().getTypeSize(OIT) / 8);
+
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+
+ llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
+ Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
+ } else {
+ Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ }
+
+ QualType T = E->getBase()->getType()->getAsPointerType()->getPointeeType();
+ LValue LV = LValue::MakeAddr(Address,
+ T.getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(T));
+ if (getContext().getLangOptions().ObjC1 &&
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC)
+ LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ return LV;
+}
+
+static
+llvm::Constant *GenerateConstantVector(llvm::SmallVector<unsigned, 4> &Elts) {
+ llvm::SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i)
+ CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i]));
+
+ return llvm::ConstantVector::get(&CElts[0], CElts.size());
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+ // Emit the base vector as an l-value.
+ LValue Base;
+
+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+ if (!E->isArrow()) {
+ assert(E->getBase()->getType()->isVectorType());
+ Base = EmitLValue(E->getBase());
+ } else {
+ const PointerType *PT = E->getBase()->getType()->getAsPointerType();
+ llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers());
+ }
+
+ // Encode the element access list into a vector of unsigned indices.
+ llvm::SmallVector<unsigned, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Base.isSimple()) {
+ llvm::Constant *CV = GenerateConstantVector(Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV,
+ Base.getQualifiers());
+ }
+ assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+ llvm::Constant *BaseElts = Base.getExtVectorElts();
+ llvm::SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
+ if (isa<llvm::ConstantAggregateZero>(BaseElts))
+ CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ else
+ CElts.push_back(BaseElts->getOperand(Indices[i]));
+ }
+ llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
+ Base.getQualifiers());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+ bool isUnion = false;
+ bool isIvar = false;
+ bool isNonGC = false;
+ Expr *BaseExpr = E->getBase();
+ llvm::Value *BaseValue = NULL;
+ unsigned CVRQualifiers=0;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy =
+ BaseExpr->getType()->getAsPointerType();
+ if (PTy->getPointeeType()->isUnionType())
+ isUnion = true;
+ CVRQualifiers = PTy->getPointeeType().getCVRQualifiers();
+ } else if (isa<ObjCPropertyRefExpr>(BaseExpr) ||
+ isa<ObjCKVCRefExpr>(BaseExpr)) {
+ RValue RV = EmitObjCPropertyGet(BaseExpr);
+ BaseValue = RV.getAggregateAddr();
+ if (BaseExpr->getType()->isUnionType())
+ isUnion = true;
+ CVRQualifiers = BaseExpr->getType().getCVRQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ if (BaseLV.isObjCIvar())
+ isIvar = true;
+ if (BaseLV.isNonGC())
+ isNonGC = true;
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ if (BaseExpr->getType()->isUnionType())
+ isUnion = true;
+ CVRQualifiers = BaseExpr->getType().getCVRQualifiers();
+ }
+
+ FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl());
+ // FIXME: Handle non-field member expressions
+ assert(Field && "No code generation for non-field member references");
+ LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion,
+ CVRQualifiers);
+ LValue::SetObjCIvar(MemExpLV, isIvar);
+ LValue::SetObjCNonGC(MemExpLV, isNonGC);
+ return MemExpLV;
+}
+
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
+ FieldDecl* Field,
+ unsigned CVRQualifiers) {
+ unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ // FIXME: CodeGenTypes should expose a method to get the appropriate type for
+ // FieldTy (the appropriate type is ABI-dependent).
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(Field->getType());
+ const llvm::PointerType *BaseTy =
+ cast<llvm::PointerType>(BaseValue->getType());
+ unsigned AS = BaseTy->getAddressSpace();
+ BaseValue = Builder.CreateBitCast(BaseValue,
+ llvm::PointerType::get(FieldTy, AS),
+ "tmp");
+ llvm::Value *V = Builder.CreateGEP(BaseValue,
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, idx),
+ "tmp");
+
+ CodeGenTypes::BitFieldInfo bitFieldInfo =
+ CGM.getTypes().getBitFieldInfo(Field);
+ return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size,
+ Field->getType()->isSignedIntegerType(),
+ Field->getType().getCVRQualifiers()|CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
+ FieldDecl* Field,
+ bool isUnion,
+ unsigned CVRQualifiers)
+{
+ if (Field->isBitField())
+ return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
+
+ unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+ // Match union field type.
+ if (isUnion) {
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(Field->getType());
+ const llvm::PointerType * BaseTy =
+ cast<llvm::PointerType>(BaseValue->getType());
+ unsigned AS = BaseTy->getAddressSpace();
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::get(FieldTy, AS),
+ "tmp");
+ }
+ if (Field->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "tmp");
+
+ QualType::GCAttrTypes attr = QualType::GCNone;
+ if (CGM.getLangOptions().ObjC1 &&
+ CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+ QualType Ty = Field->getType();
+ attr = Ty.getObjCGCAttr();
+ if (attr != QualType::GCNone) {
+ // __weak attribute on a field is ignored.
+ if (attr == QualType::Weak)
+ attr = QualType::GCNone;
+ }
+ else if (getContext().isObjCObjectPointerType(Ty))
+ attr = QualType::Strong;
+ }
+ LValue LV =
+ LValue::MakeAddr(V,
+ Field->getType().getCVRQualifiers()|CVRQualifiers,
+ attr);
+ return LV;
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
+ const llvm::Type *LTy = ConvertType(E->getType());
+ llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral");
+
+ const Expr* InitExpr = E->getInitializer();
+ LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers());
+
+ if (E->getType()->isComplexType()) {
+ EmitComplexExprIntoAddr(InitExpr, DeclPtr, false);
+ } else if (hasAggregateLLVMType(E->getType())) {
+ EmitAnyExpr(InitExpr, DeclPtr, false);
+ } else {
+ EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType());
+ }
+
+ return Result;
+}
+
+LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) {
+ // We don't handle vectors yet.
+ if (E->getType()->isVectorType())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAggExpr(E, Temp, false);
+
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+
+}
+
+/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code
+/// generator in an lvalue context, then it must mean that we need the address
+/// of an aggregate in order to access one of its fields. This can happen for
+/// all the reasons that casts are permitted with aggregate result, including
+/// noop aggregate casts, and cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ // If this is an aggregate-to-aggregate cast, just use the input's address as
+ // the lvalue.
+ if (getContext().hasSameUnqualifiedType(E->getType(),
+ E->getSubExpr()->getType()))
+ return EmitLValue(E->getSubExpr());
+
+ // Otherwise, we must have a cast from scalar to union.
+ assert(E->getType()->isUnionType() && "Expected scalar-to-union cast");
+
+ // Casts are only lvalues when the source and destination types are the same.
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAnyExpr(E->getSubExpr(), Temp, false);
+
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+//===--------------------------------------------------------------------===//
+// Expression Emission
+//===--------------------------------------------------------------------===//
+
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) {
+ // Builtins never have block type.
+ if (E->getCallee()->getType()->isBlockPointerType())
+ return EmitBlockCallExpr(E);
+
+ if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
+ return EmitCXXMemberCallExpr(CE);
+
+ const Decl *TargetDecl = 0;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
+ if (unsigned builtinID = FD->getBuiltinID(getContext()))
+ return EmitBuiltinExpr(FD, builtinID, E);
+ }
+ }
+
+ if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ return EmitCXXOperatorMemberCallExpr(CE, MD);
+ }
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ return EmitCall(Callee, E->getCallee()->getType(),
+ E->arg_begin(), E->arg_end(), TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+ // Comma expressions just emit their LHS then their RHS as an l-value.
+ if (E->getOpcode() == BinaryOperator::Comma) {
+ EmitAnyExpr(E->getLHS());
+ return EmitLValue(E->getRHS());
+ }
+
+ // Can only get l-value for binary operator expressions which are a
+ // simple assignment of aggregate type.
+ if (E->getOpcode() != BinaryOperator::Assign)
+ return EmitUnsupportedLValue(E, "binary l-value expression");
+
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAggExpr(E, Temp, false);
+ // FIXME: Are these qualifiers correct?
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+ RValue RV = EmitCallExpr(E);
+
+ if (RV.isScalar()) {
+ assert(E->getCallReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+ }
+
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+ // FIXME: This shouldn't require another copy.
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAggExpr(E, Temp, false);
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) {
+ EmitLocalBlockVarDecl(*E->getVarDecl());
+ return EmitDeclRefLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp");
+ EmitCXXConstructExpr(Temp, E);
+ return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ PushCXXTemporary(E->getTemporary(), LV.getAddress());
+
+ return LV;
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitObjCMessageExpr(E);
+ // FIXME: can this be volatile?
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+ Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+ // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+ llvm::Value *BaseValue = 0;
+ const Expr *BaseExpr = E->getBase();
+ unsigned CVRQualifiers = 0;
+ QualType ObjectTy;
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAsPointerType();
+ ObjectTy = PTy->getPointeeType();
+ CVRQualifiers = ObjectTy.getCVRQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ ObjectTy = BaseExpr->getType();
+ CVRQualifiers = ObjectTy.getCVRQualifiers();
+ }
+
+ return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers);
+}
+
+LValue
+CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or
+ // store through it.
+ return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or
+ // store through it.
+ return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue
+CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
+ return EmitUnsupportedLValue(E, "use of super");
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitAnyExprToTemp(E);
+ // FIXME: can this be volatile?
+ return LValue::MakeAddr(RV.getAggregateAddr(),
+ E->getType().getCVRQualifiers(),
+ getContext().getObjCGCAttrKind(E->getType()));
+}
+
+
+RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl) {
+ // Get the actual function type. The callee type will always be a
+ // pointer to function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
+ "Call must have function pointer type!");
+
+ QualType FnType = CalleeType->getAsPointerType()->getPointeeType();
+ QualType ResultType = FnType->getAsFunctionType()->getResultType();
+
+ CallArgList Args;
+ EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd);
+
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
+ Callee, Args, TargetDecl);
+}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..469c830
--- /dev/null
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,554 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ llvm::Value *DestPtr;
+ bool VolatileDest;
+ bool IgnoreResult;
+
+public:
+ AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
+ bool ignore)
+ : CGF(cgf), Builder(CGF.Builder),
+ DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+ /// represents a value lvalue, this method emits the address of the lvalue,
+ /// then loads the result into DestPtr.
+ void EmitAggLoadOfLValue(const Expr *E);
+
+ /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+ void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ void VisitStmt(Stmt *S) {
+ CGF.ErrorUnsupported(S, "aggregate expression");
+ }
+ void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+
+ // l-values.
+ void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
+ void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+ void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+ void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ // Operators.
+ void VisitCStyleCastExpr(CStyleCastExpr *E);
+ void VisitImplicitCastExpr(ImplicitCastExpr *E);
+ void VisitCallExpr(const CallExpr *E);
+ void VisitStmtExpr(const StmtExpr *E);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitBinAssign(const BinaryOperator *E);
+ void VisitBinComma(const BinaryOperator *E);
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+ void VisitObjCKVCRefExpr(ObjCKVCRefExpr *E);
+
+ void VisitConditionalOperator(const ConditionalOperator *CO);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ Visit(DAE->getExpr());
+ }
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+
+ void VisitVAArgExpr(VAArgExpr *E);
+
+ void EmitInitializationToLValue(Expr *E, LValue Address);
+ void EmitNullInitializationToLValue(LValue Address, QualType T);
+ // case Expr::ChooseExprClass:
+
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
+ assert(Src.isAggregate() && "value must be aggregate value!");
+
+ // If the result is ignored, don't copy from the value.
+ if (DestPtr == 0) {
+ if (!Src.isVolatileQualified() || (IgnoreResult && Ignore))
+ return;
+ // If the source is volatile, we must read from it; to do that, we need
+ // some place to put it.
+ DestPtr = CGF.CreateTempAlloca(CGF.ConvertType(E->getType()), "agg.tmp");
+ }
+
+ // If the result of the assignment is used, copy the LHS there also.
+ // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
+ // from the source as well, as we can't eliminate it if either operand
+ // is volatile, unless copy has volatile for both source and destination..
+ CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
+ VolatileDest|Src.isVolatileQualified());
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
+ assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
+
+ EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
+ Src.isVolatileQualified()),
+ Ignore);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ // GCC union extension
+ if (E->getType()->isUnionType()) {
+ RecordDecl *SD = E->getType()->getAsRecordType()->getDecl();
+ LValue FieldLoc = CGF.EmitLValueForField(DestPtr,
+ *SD->field_begin(CGF.getContext()),
+ true, 0);
+ EmitInitializationToLValue(E->getSubExpr(), FieldLoc);
+ return;
+ }
+
+ Visit(E->getSubExpr());
+}
+
+void AggExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType()) {
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ RValue RV = CGF.EmitCallExpr(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ RValue RV = CGF.EmitObjCMessageExpr(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ RValue RV = CGF.EmitObjCPropertyGet(E);
+ EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
+ CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+ CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType())
+ && "Invalid assignment");
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // We have to special case property setters, otherwise we must have
+ // a simple lvalue (no aggregates inside vectors, bitfields).
+ if (LHS.isPropertyRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ }
+ else if (LHS.isKVCRef()) {
+ llvm::Value *AggLoc = DestPtr;
+ if (!AggLoc)
+ AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
+ CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
+ RValue::getAggregate(AggLoc, VolatileDest));
+ } else {
+ // Codegen the RHS so that it stores directly into the LHS.
+ CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified());
+ EmitFinalDestCopy(E, LHS, true);
+ }
+}
+
+void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
+ Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for aggregate value");
+
+ Visit(E->getLHS());
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ Visit(E->getRHS());
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+ return;
+ }
+
+ EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, 0));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
+
+ // FIXME: volatile
+ CGF.EmitAggExpr(E->getSubExpr(), Val, false);
+ } else
+ Visit(E->getSubExpr());
+
+ CGF.PushCXXTemporary(E->getTemporary(), Val);
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ llvm::Value *Val = DestPtr;
+
+ if (!Val) {
+ // Create a temporary variable.
+ Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
+ }
+
+ CGF.EmitCXXConstructExpr(Val, E);
+}
+
+void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ CGF.EmitCXXExprWithTemporaries(E, DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
+ // FIXME: Ignore result?
+ // FIXME: Are initializers affected by volatile?
+ if (isa<ImplicitValueInitExpr>(E)) {
+ EmitNullInitializationToLValue(LV, E->getType());
+ } else if (E->getType()->isComplexType()) {
+ CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
+ } else if (CGF.hasAggregateLLVMType(E->getType())) {
+ CGF.EmitAnyExpr(E, LV.getAddress(), false);
+ } else {
+ CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, E->getType());
+ }
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+ if (!CGF.hasAggregateLLVMType(T)) {
+ // For non-aggregates, we can store zero
+ llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
+ CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
+ } else {
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers are guaranteed to have a
+ // bit pattern of all zeros.
+ // FIXME: That isn't true for member pointers!
+ // There's a potential optimization opportunity in combining
+ // memsets; that would be easy for arrays, but relatively
+ // difficult for structures with the current code.
+ CGF.EmitMemSetToZero(LV.getAddress(), T);
+ }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+ // FIXME: Disabled while we figure out what to do about
+ // test/CodeGen/bitfield.c
+ //
+ // If we can, prefer a copy from a global; this is a lot less code for long
+ // globals, and it's easier for the current optimizers to analyze.
+ // FIXME: Should we really be doing this? Should we try to avoid cases where
+ // we emit a global with a lot of zeros? Should we try to avoid short
+ // globals?
+ if (E->isConstantInitializer(CGF.getContext(), 0)) {
+ llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, &CGF);
+ llvm::GlobalVariable* GV =
+ new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ C, "", &CGF.CGM.getModule(), 0);
+ EmitFinalDestCopy(E, LValue::MakeAddr(GV, 0));
+ return;
+ }
+#endif
+ if (E->hadArrayRangeDesignator()) {
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ }
+
+ // Handle initialization of an array.
+ if (E->getType()->isArrayType()) {
+ const llvm::PointerType *APType =
+ cast<llvm::PointerType>(DestPtr->getType());
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+
+ uint64_t NumInitElements = E->getNumInits();
+
+ if (E->getNumInits() > 0) {
+ QualType T1 = E->getType();
+ QualType T2 = E->getInit(0)->getType();
+ if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
+ EmitAggLoadOfLValue(E->getInit(0));
+ return;
+ }
+ }
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
+ ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
+
+ unsigned CVRqualifier = ElementType.getCVRQualifiers();
+
+ for (uint64_t i = 0; i != NumArrayElements; ++i) {
+ llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
+ if (i < NumInitElements)
+ EmitInitializationToLValue(E->getInit(i),
+ LValue::MakeAddr(NextVal, CVRqualifier));
+ else
+ EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, CVRqualifier),
+ ElementType);
+ }
+ return;
+ }
+
+ assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned NumInitElements = E->getNumInits();
+ RecordDecl *SD = E->getType()->getAsRecordType()->getDecl();
+ unsigned CurInitVal = 0;
+
+ if (E->getType()->isUnionType()) {
+ // Only initialize one field of a union. The field itself is
+ // specified by the initializer list.
+ if (!E->getInitializedFieldInUnion()) {
+ // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()),
+ FieldEnd = SD->field_end(CGF.getContext());
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return;
+ }
+
+ // FIXME: volatility
+ FieldDecl *Field = E->getInitializedFieldInUnion();
+ LValue FieldLoc = CGF.EmitLValueForField(DestPtr, Field, true, 0);
+
+ if (NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(0), FieldLoc);
+ } else {
+ // Default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+
+ return;
+ }
+
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()),
+ FieldEnd = SD->field_end(CGF.getContext());
+ Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member
+ if (Field->getType()->isIncompleteArrayType())
+ break;
+
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // FIXME: volatility
+ LValue FieldLoc = CGF.EmitLValueForField(DestPtr, *Field, false, 0);
+ // We never generate write-barries for initialized fields.
+ LValue::SetObjCNonGC(FieldLoc, true);
+ if (CurInitVal < NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc);
+ } else {
+ // We're out of initalizers; default-initialize to null
+ EmitNullInitializationToLValue(FieldLoc, Field->getType());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type. The result is computed into DestPtr. Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed. If VolatileDest is
+/// true, DestPtr cannot be 0.
+void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
+ bool VolatileDest, bool IgnoreResult) {
+ assert(E && hasAggregateLLVMType(E->getType()) &&
+ "Invalid aggregate expression to emit");
+ assert ((DestPtr != 0 || VolatileDest == false)
+ && "volatile aggregate can't be 0");
+
+ AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult)
+ .Visit(const_cast<Expr*>(E));
+}
+
+void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ EmitMemSetToZero(DestPtr, Ty);
+}
+
+void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
+ llvm::Value *SrcPtr, QualType Ty,
+ bool isVolatile) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ // Aggregate assignment turns into llvm.memcpy. This is almost valid per
+ // C99 6.5.16.1p3, which states "If the value being stored in an object is
+ // read from another object that overlaps in anyway the storage of the first
+ // object, then the overlap shall be exact and the two objects shall have
+ // qualified or unqualified versions of a compatible type."
+ //
+ // memcpy is not defined if the source and destination pointers are exactly
+ // equal, but other compilers do this optimization, and almost every memcpy
+ // implementation handles this case safely. If there is a libc that does not
+ // safely handle this, we can add a target hook.
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+ // FIXME: Handle variable sized types.
+ const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
+
+ // FIXME: If we have a volatile struct, the optimizer can remove what might
+ // appear to be `extra' memory ops:
+ //
+ // volatile struct { int i; } a, b;
+ //
+ // int main() {
+ // a = b;
+ // a = b;
+ // }
+ //
+ // we need to use a differnt call here. We use isVolatile to indicate when
+ // either the source or the destination is volatile.
+ Builder.CreateCall4(CGM.getMemCpyFn(),
+ DestPtr, SrcPtr,
+ // TypeInfo.first describes size in bits.
+ llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ TypeInfo.second/8));
+}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..41fb725
--- /dev/null
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,663 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Compiler.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+namespace {
+class VISIBILITY_HIDDEN ComplexExprEmitter
+ : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ // True is we should ignore the value of a
+ bool IgnoreReal;
+ bool IgnoreImag;
+ // True if we should ignore the value of a=b
+ bool IgnoreRealAssign;
+ bool IgnoreImagAssign;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
+ bool irn=false, bool iin=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
+ IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreReal() {
+ bool I = IgnoreReal;
+ IgnoreReal = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImag() {
+ bool I = IgnoreImag;
+ IgnoreImag = false;
+ return I;
+ }
+ bool TestAndClearIgnoreRealAssign() {
+ bool I = IgnoreRealAssign;
+ IgnoreRealAssign = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImagAssign() {
+ bool I = IgnoreImagAssign;
+ IgnoreImagAssign = false;
+ return I;
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+ }
+
+ /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
+ /// the real and imaginary pieces.
+ ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+
+ /// EmitStoreOfComplex - Store the specified real/imag parts into the
+ /// specified value pointer.
+ void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
+
+ /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+ QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ ComplexPairTy VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return ComplexPairTy();
+ }
+ ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+
+ // l-values.
+ ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // FIXME: CompoundLiteralExpr
+
+ ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
+ ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // Unlike for scalars, we don't have to worry about function->ptr demotion
+ // here.
+ return EmitCast(E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCastExpr(CastExpr *E) {
+ return EmitCast(E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCallExpr(const CallExpr *E);
+ ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+ // Operators.
+ ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre);
+ ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+ ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
+ // LNot,Real,Imag never return complex.
+ ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
+ }
+ ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAsComplexType()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+ ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAsComplexType()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+
+ struct BinOpInfo {
+ ComplexPairTy LHS;
+ ComplexPairTy RHS;
+ QualType Ty; // Computation Type.
+ };
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &));
+
+ ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+ ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+ ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+ ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+ return EmitBinAdd(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+ return EmitBinSub(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+ return EmitBinDiv(EmitBinOps(E));
+ }
+
+ // Compound assignments.
+ ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+ }
+ ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+ }
+ ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+ }
+ ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+ }
+
+ // GCC rejects rem/and/or/xor for integer complex.
+ // Logical and/or always return int, never complex.
+
+ // No comparisons produce a complex result.
+ ComplexPairTy VisitBinAssign (const BinaryOperator *E);
+ ComplexPairTy VisitBinComma (const BinaryOperator *E);
+
+
+ ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
+ ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+ ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+ ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
+ bool isVolatile) {
+ llvm::SmallString<64> Name(SrcPtr->getNameStart(),
+ SrcPtr->getNameStart()+SrcPtr->getNameLen());
+
+ llvm::Value *Real=0, *Imag=0;
+
+ if (!IgnoreReal) {
+ Name += ".realp";
+ llvm::Value *RealPtr = Builder.CreateStructGEP(SrcPtr, 0, Name.c_str());
+
+ Name.pop_back(); // .realp -> .real
+ Real = Builder.CreateLoad(RealPtr, isVolatile, Name.c_str());
+ Name.resize(Name.size()-4); // .real -> .imagp
+ }
+
+ if (!IgnoreImag) {
+ Name += "imagp";
+
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(SrcPtr, 1, Name.c_str());
+
+ Name.pop_back(); // .imagp -> .imag
+ Imag = Builder.CreateLoad(ImagPtr, isVolatile, Name.c_str());
+ }
+ return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
+ bool isVolatile) {
+ llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
+
+ Builder.CreateStore(Val.first, RealPtr, isVolatile);
+ Builder.CreateStore(Val.second, ImagPtr, isVolatile);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "complex expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAsComplexType()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+ return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+}
+
+/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+ QualType SrcType,
+ QualType DestType) {
+ // Get the src/dest element type.
+ SrcType = SrcType->getAsComplexType()->getElementType();
+ DestType = DestType->getAsComplexType()->getElementType();
+
+ // C99 6.3.1.6: When a value of complex type is converted to another
+ // complex type, both the real and imaginary parts follow the conversion
+ // rules for the corresponding real types.
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
+ // Two cases here: cast from (complex to complex) and (scalar to complex).
+ if (Op->getType()->isAnyComplexType())
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+
+ // C99 6.3.1.7: When a value of real type is converted to a complex type, the
+ // real part of the complex result value is determined by the rules of
+ // conversion to the corresponding real type and the imaginary part of the
+ // complex result value is a positive zero or an unsigned zero.
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAsComplexType()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+ } else {
+ QualType ElemTy = E->getType()->getAsComplexType()->getElementType();
+ llvm::APFloat FVal(CGF.getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(FVal);
+ }
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ EmitStoreOfComplex(IncVal, LV.getAddress(), LV.isVolatileQualified());
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResR = Builder.CreateNeg(Op.first, "neg.r");
+ llvm::Value *ResI = Builder.CreateNeg(Op.second, "neg.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ // ~(a+ib) = a + i*-b
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResI = Builder.CreateNeg(Op.second, "conj.i");
+ return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+ llvm::Value *ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ llvm::Value *ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+ llvm::Value *ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ llvm::Value *ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+ llvm::Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ llvm::Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ llvm::Value *ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+ llvm::Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ llvm::Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ llvm::Value *ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+ llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+ llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+ llvm::Value *DSTr, *DSTi;
+ if (Tmp3->getType()->isFloatingPoint()) {
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ if (Op.Ty->getAsComplexType()->getElementType()->isUnsignedIntegerType()) {
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp");
+ } else {
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp");
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp");
+ }
+ }
+
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ BinOpInfo Ops;
+ Ops.LHS = Visit(E->getLHS());
+ Ops.RHS = Visit(E->getRHS());
+ Ops.Ty = E->getType();
+ return Ops;
+}
+
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little. It is possible for the RHS to be complex or
+ // scalar.
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
+
+ LValue LHSLV = CGF.EmitLValue(E->getLHS());
+
+
+ // We know the LHS is a complex lvalue.
+ OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(),LHSLV.isVolatileQualified());
+ OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty);
+
+ // Expand the binary operator.
+ ComplexPairTy Result = (this->*Func)(OpInfo);
+
+ // Truncate the result back to the LHS type.
+ Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+
+ // Store the result value into the LHS lvalue.
+ EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
+ // And now return the LHS
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+ return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ bool ignreal = TestAndClearIgnoreRealAssign();
+ bool ignimag = TestAndClearIgnoreImagAssign();
+ assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) ==
+ CGF.getContext().getCanonicalType(E->getRHS()->getType()) &&
+ "Invalid assignment");
+ // Emit the RHS.
+ ComplexPairTy Val = Visit(E->getRHS());
+
+ // Compute the address to store into.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Store into it.
+ EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
+ // And now return the LHS
+ IgnoreReal = ignreal;
+ IgnoreImag = ignimag;
+ IgnoreRealAssign = ignreal;
+ IgnoreImagAssign = ignimag;
+ return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ TestAndClearIgnoreRealAssign();
+ TestAndClearIgnoreImagAssign();
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
+ Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ assert(E->getLHS() && "Must have LHS for complex value");
+
+ ComplexPairTy LHS = Visit(E->getLHS());
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ ComplexPairTy RHS = Visit(E->getRHS());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
+ RealPN->reserveOperandSpace(2);
+ RealPN->addIncoming(LHS.first, LHSBlock);
+ RealPN->addIncoming(RHS.first, RHSBlock);
+
+ // Create a PHI node for the imaginary part.
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i");
+ ImagPN->reserveOperandSpace(2);
+ ImagPN->addIncoming(LHS.second, LHSBlock);
+ ImagPN->addIncoming(RHS.second, RHSBlock);
+
+ return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreReal();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ Ignore = TestAndClearIgnoreImag();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ if (E->getNumInits())
+ return Visit(E->getInit(0));
+
+ // Empty init list intializes to null
+ QualType Ty = E->getType()->getAsComplexType()->getElementType();
+ const llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+ return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(E, "complex va_arg expression");
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAsComplexType()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ // FIXME Volatility.
+ return EmitLoadOfComplex(ArgPtr, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+ bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+/// of complex type, storing into the specified Value*.
+void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+ ComplexExprEmitter Emitter(*this);
+ ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+ Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile);
+}
+
+/// StoreComplexToAddr - Store a complex number into the specified address.
+void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile);
+}
+
+/// LoadComplexFromAddr - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+ bool SrcIsVolatile) {
+ return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
+}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..b30bafb
--- /dev/null
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,588 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class VISIBILITY_HIDDEN ConstExprEmitter :
+ public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+public:
+ ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+ : CGM(cgm), CGF(cgf) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *VisitStmt(Stmt *S) {
+ return 0;
+ }
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+
+ llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Visit(E->getInitializer());
+ }
+
+ llvm::Constant *VisitCastExpr(CastExpr* E) {
+ // GCC cast to union extension
+ if (E->getType()->isUnionType()) {
+ const llvm::Type *Ty = ConvertType(E->getType());
+ Expr *SubExpr = E->getSubExpr();
+ return EmitUnion(CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF),
+ Ty);
+ }
+ // Explicit and implicit no-op casts
+ QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
+ if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) {
+ return Visit(E->getSubExpr());
+ }
+ return 0;
+ }
+
+ llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+
+ llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+ std::vector<llvm::Constant*> Elts;
+ const llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+ unsigned NumInitElements = ILE->getNumInits();
+ // FIXME: Check for wide strings
+ // FIXME: Check for NumInitElements exactly equal to 1??
+ if (NumInitElements > 0 &&
+ (isa<StringLiteral>(ILE->getInit(0)) ||
+ isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
+ ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
+ return Visit(ILE->getInit(0));
+ const llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Initialising an array requires us to automatically
+ // initialise any elements that have not been initialised explicitly
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ unsigned i = 0;
+ bool RewriteType = false;
+ for (; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ RewriteType |= (C->getType() != ElemTy);
+ Elts.push_back(C);
+ }
+
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ for (; i < NumElements; ++i)
+ Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+
+ if (RewriteType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<const llvm::Type*> Types;
+ for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.push_back(Elts[i]->getType());
+ const llvm::StructType *SType = llvm::StructType::get(Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts,
+ FieldDecl* Field, Expr* E) {
+ // Calculate the value to insert
+ llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF);
+ if (!C)
+ return;
+
+ llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C);
+ if (!CI) {
+ CGM.ErrorUnsupported(E, "bitfield initialization");
+ return;
+ }
+ llvm::APInt V = CI->getValue();
+
+ // Calculate information about the relevant field
+ const llvm::Type* Ty = CI->getType();
+ const llvm::TargetData &TD = CGM.getTypes().getTargetData();
+ unsigned size = TD.getTypeAllocSizeInBits(Ty);
+ unsigned fieldOffset = CGM.getTypes().getLLVMFieldNo(Field) * size;
+ CodeGenTypes::BitFieldInfo bitFieldInfo =
+ CGM.getTypes().getBitFieldInfo(Field);
+ fieldOffset += bitFieldInfo.Begin;
+
+ // Find where to start the insertion
+ // FIXME: This is O(n^2) in the number of bit-fields!
+ // FIXME: This won't work if the struct isn't completely packed!
+ unsigned offset = 0, i = 0;
+ while (offset < (fieldOffset & -8))
+ offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType());
+
+ // Advance over 0 sized elements (must terminate in bounds since
+ // the bitfield must have a size).
+ while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0)
+ ++i;
+
+ // Promote the size of V if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (bitFieldInfo.Size > V.getBitWidth())
+ V.zext(bitFieldInfo.Size);
+
+ // Insert the bits into the struct
+ // FIXME: This algorthm is only correct on X86!
+ // FIXME: THis algorthm assumes bit-fields only have byte-size elements!
+ unsigned bitsToInsert = bitFieldInfo.Size;
+ unsigned curBits = std::min(8 - (fieldOffset & 7), bitsToInsert);
+ unsigned byte = V.getLoBits(curBits).getZExtValue() << (fieldOffset & 7);
+ do {
+ llvm::Constant* byteC = llvm::ConstantInt::get(llvm::Type::Int8Ty, byte);
+ Elts[i] = llvm::ConstantExpr::getOr(Elts[i], byteC);
+ ++i;
+ V = V.lshr(curBits);
+ bitsToInsert -= curBits;
+
+ if (!bitsToInsert)
+ break;
+
+ curBits = bitsToInsert > 8 ? 8 : bitsToInsert;
+ byte = V.getLoBits(curBits).getZExtValue();
+ } while (true);
+ }
+
+ llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
+ const llvm::StructType *SType =
+ cast<llvm::StructType>(ConvertType(ILE->getType()));
+ RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
+ std::vector<llvm::Constant*> Elts;
+
+ // Initialize the whole structure to zero.
+ // FIXME: This doesn't handle member pointers correctly!
+ for (unsigned i = 0; i < SType->getNumElements(); ++i) {
+ const llvm::Type *FieldTy = SType->getElementType(i);
+ Elts.push_back(llvm::Constant::getNullValue(FieldTy));
+ }
+
+ // Copy initializer elements. Skip padding fields.
+ unsigned EltNo = 0; // Element no in ILE
+ int FieldNo = 0; // Field no in RecordDecl
+ bool RewriteType = false;
+ for (RecordDecl::field_iterator Field = RD->field_begin(CGM.getContext()),
+ FieldEnd = RD->field_end(CGM.getContext());
+ EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) {
+ FieldNo++;
+ if (!Field->getIdentifier())
+ continue;
+
+ if (Field->isBitField()) {
+ InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo));
+ } else {
+ unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field);
+ llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo),
+ Field->getType(), CGF);
+ if (!C) return 0;
+ RewriteType |= (C->getType() != Elts[FieldNo]->getType());
+ Elts[FieldNo] = C;
+ }
+ EltNo++;
+ }
+
+ if (RewriteType) {
+ // FIXME: Make this work for non-packed structs
+ assert(SType->isPacked() && "Cannot recreate unpacked structs");
+ std::vector<const llvm::Type*> Types;
+ for (unsigned i = 0; i < Elts.size(); ++i)
+ Types.push_back(Elts[i]->getType());
+ SType = llvm::StructType::get(Types, true);
+ }
+
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) {
+ if (!C)
+ return 0;
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ std::vector<llvm::Constant*> Elts;
+ std::vector<const llvm::Type*> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
+ while (CurSize < TotalSize) {
+ Elts.push_back(llvm::Constant::getNullValue(llvm::Type::Int8Ty));
+ Types.push_back(llvm::Type::Int8Ty);
+ CurSize++;
+ }
+
+ // This always generates a packed struct
+ // FIXME: Try to generate an unpacked struct when we can
+ llvm::StructType* STy = llvm::StructType::get(Types, true);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+
+ llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ const llvm::Type *Ty = ConvertType(ILE->getType());
+
+ FieldDecl* curField = ILE->getInitializedFieldInUnion();
+ if (!curField) {
+ // There's no field to initialize, so value-initialize the union.
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
+ for (RecordDecl::field_iterator Field = RD->field_begin(CGM.getContext()),
+ FieldEnd = RD->field_end(CGM.getContext());
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return llvm::Constant::getNullValue(Ty);
+ }
+
+ if (curField->isBitField()) {
+ // Create a dummy struct for bit-field insertion
+ unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty);
+ llvm::Constant* NV = llvm::Constant::getNullValue(llvm::Type::Int8Ty);
+ std::vector<llvm::Constant*> Elts(NumElts, NV);
+
+ InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0));
+ const llvm::ArrayType *RetTy =
+ llvm::ArrayType::get(NV->getType(), NumElts);
+ return llvm::ConstantArray::get(RetTy, Elts);
+ }
+
+ llvm::Constant *InitElem;
+ if (ILE->getNumInits() > 0) {
+ Expr *Init = ILE->getInit(0);
+ InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ } else {
+ InitElem = CGM.EmitNullConstant(curField->getType());
+ }
+ return EmitUnion(InitElem, Ty);
+ }
+
+ llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
+ const llvm::VectorType *VType =
+ cast<llvm::VectorType>(ConvertType(ILE->getType()));
+ const llvm::Type *ElemTy = VType->getElementType();
+ std::vector<llvm::Constant*> Elts;
+ unsigned NumElements = VType->getNumElements();
+ unsigned NumInitElements = ILE->getNumInits();
+
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ unsigned i = 0;
+ for (; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ Elts.push_back(C);
+ }
+
+ for (; i < NumElements; ++i)
+ Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+
+ return llvm::ConstantVector::get(VType, Elts);
+ }
+
+ llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+ return CGM.EmitNullConstant(E->getType());
+ }
+
+ llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isScalarType()) {
+ // We have a scalar in braces. Just use the first element.
+ if (ILE->getNumInits() > 0) {
+ Expr *Init = ILE->getInit(0);
+ return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ }
+ return CGM.EmitNullConstant(ILE->getType());
+ }
+
+ if (ILE->getType()->isArrayType())
+ return EmitArrayInitialization(ILE);
+
+ if (ILE->getType()->isStructureType())
+ return EmitStructInitialization(ILE);
+
+ if (ILE->getType()->isUnionType())
+ return EmitUnionInitialization(ILE);
+
+ if (ILE->getType()->isVectorType())
+ return EmitVectorInitialization(ILE);
+
+ assert(0 && "Unable to handle InitListExpr");
+ // Get rid of control reaches end of void function warning.
+ // Not reached.
+ return 0;
+ }
+
+ llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // This must be a string initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false);
+ }
+
+ llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ // This must be an @encode initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ std::string Str;
+ CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
+
+ // Resize the string to the right size, adding zeros at the end, or
+ // truncating as needed.
+ Str.resize(CAT->getSize().getZExtValue(), '\0');
+ return llvm::ConstantArray::get(Str, false);
+ }
+
+ llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // Utility methods
+ const llvm::Type *ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+ }
+
+public:
+ llvm::Constant *EmitLValue(Expr *E) {
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = Visit(CLE->getInitializer());
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(C->getType(),
+ E->getType().isConstQualified(),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", &CGM.getModule());
+ return C;
+ }
+ case Expr::DeclRefExprClass:
+ case Expr::QualifiedDeclRefExprClass: {
+ NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+ return CGM.GetAddrOfFunction(GlobalDecl(FD));
+ if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+ // We can never refer to a variable with local storage.
+ if (!VD->hasLocalStorage()) {
+ if (VD->isFileVarDecl() || VD->hasExternalStorage())
+ return CGM.GetAddrOfGlobalVar(VD);
+ else if (VD->isBlockVarDecl()) {
+ assert(CGF && "Can't access static local vars without CGF");
+ return CGF->GetAddrOfStaticLocalVar(VD);
+ }
+ }
+ }
+ break;
+ }
+ case Expr::StringLiteralClass:
+ return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+ case Expr::ObjCStringLiteralClass: {
+ ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+ llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ }
+ case Expr::PredefinedExprClass: {
+ // __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level".
+ std::string Str;
+ if (cast<PredefinedExpr>(E)->getIdentType() ==
+ PredefinedExpr::PrettyFunction)
+ Str = "top level";
+
+ return CGM.GetAddrOfConstantCString(Str, ".tmp");
+ }
+ case Expr::AddrLabelExprClass: {
+ assert(CGF && "Invalid address of label expression outside function.");
+ unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id);
+ return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType()));
+ }
+ case Expr::CallExprClass: {
+ CallExpr* CE = cast<CallExpr>(E);
+ if (CE->isBuiltinCall(CGM.getContext()) !=
+ Builtin::BI__builtin___CFStringMakeConstantString)
+ break;
+ const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+ const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ // FIXME: need to deal with UCN conversion issues.
+ return CGM.GetAddrOfConstantCFString(Literal);
+ }
+ case Expr::BlockExprClass: {
+ std::string FunctionName;
+ if (CGF)
+ FunctionName = CGF->CurFn->getName();
+ else
+ FunctionName = "global";
+
+ return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ }
+ }
+
+ return 0;
+ }
+};
+
+} // end anonymous namespace.
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ Expr::EvalResult Result;
+
+ bool Success = false;
+
+ if (DestType->isReferenceType())
+ Success = E->EvaluateAsLValue(Result, Context);
+ else
+ Success = E->Evaluate(Result, Context);
+
+ if (Success) {
+ assert(!Result.HasSideEffects &&
+ "Constant expr should not have any side effects!");
+ switch (Result.Val.getKind()) {
+ case APValue::Uninitialized:
+ assert(0 && "Constant expressions should be initialized.");
+ return 0;
+ case APValue::LValue: {
+ const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ Result.Val.getLValueOffset());
+
+ llvm::Constant *C;
+ if (const Expr *LVBase = Result.Val.getLValueBase()) {
+ C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
+
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ const llvm::Type *Type =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+ }
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+ return C;
+ }
+ }
+ case APValue::Int: {
+ llvm::Constant *C = llvm::ConstantInt::get(Result.Val.getInt());
+
+ if (C->getType() == llvm::Type::Int1Ty) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+ }
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(Result.Val.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(Result.Val.getComplexIntImag());
+
+ return llvm::ConstantStruct::get(Complex, 2);
+ }
+ case APValue::Float:
+ return llvm::ConstantFP::get(Result.Val.getFloat());
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(Result.Val.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(Result.Val.getComplexFloatImag());
+
+ return llvm::ConstantStruct::get(Complex, 2);
+ }
+ case APValue::Vector: {
+ llvm::SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Result.Val.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ APValue &Elt = Result.Val.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(Elt.getInt()));
+ else
+ Inits.push_back(llvm::ConstantFP::get(Elt.getFloat()));
+ }
+ return llvm::ConstantVector::get(&Inits[0], Inits.size());
+ }
+ }
+ }
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType() == llvm::Type::Int1Ty) {
+ const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ // Always return an LLVM null constant for now; this will change when we
+ // get support for IRGen of member pointers.
+ return llvm::Constant::getNullValue(getTypes().ConvertType(T));
+}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..950e9e5
--- /dev/null
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,1575 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+// Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+struct BinOpInfo {
+ Value *LHS;
+ Value *RHS;
+ QualType Ty; // Computation Type.
+ const BinaryOperator *E;
+};
+
+namespace {
+class VISIBILITY_HIDDEN ScalarExprEmitter
+ : public StmtVisitor<ScalarExprEmitter, Value*> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreResultAssign;
+
+public:
+
+ ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreResultAssign() {
+ bool I = IgnoreResultAssign; IgnoreResultAssign = false;
+ return I; }
+
+ const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+
+ Value *EmitLoadOfLValue(LValue LV, QualType T) {
+ return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ Value *EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(EmitLValue(E), E->getType());
+ }
+
+ /// EmitConversionToBool - Convert the specified expression value to a
+ /// boolean (i1) truth value. This is equivalent to "Val != 0".
+ Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination
+ /// type is an LLVM scalar type.
+ Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ Value *VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ assert(0 && "Stmt can't have complex result type!");
+ return 0;
+ }
+ Value *VisitExpr(Expr *S);
+ Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+
+ // Leaves.
+ Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+ return llvm::ConstantInt::get(E->getValue());
+ }
+ Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+ return llvm::ConstantFP::get(E->getValue());
+ }
+ Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),
+ CGF.getContext().typesAreCompatible(
+ E->getArgType1(), E->getArgType2()));
+ }
+ Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
+ Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ llvm::Value *V =
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ CGF.GetIDForAddrOfLabel(E->getLabel()));
+
+ return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
+ }
+
+ // l-values.
+ Value *VisitDeclRefExpr(DeclRefExpr *E) {
+ if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl()))
+ return llvm::ConstantInt::get(EC->getInitVal());
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
+ }
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
+ }
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getScalarVal();
+ }
+
+ Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitMemberExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitStringLiteral(Expr *E) { return EmitLValue(E).getAddress(); }
+ Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
+ return EmitLValue(E).getAddress();
+ }
+
+ Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); }
+
+ Value *VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ unsigned NumInitElements = E->getNumInits();
+
+ if (E->hadArrayRangeDesignator()) {
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ }
+
+ const llvm::VectorType *VType =
+ dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+
+ // We have a scalar in braces. Just use the first element.
+ if (!VType)
+ return Visit(E->getInit(0));
+
+ unsigned NumVectorElements = VType->getNumElements();
+ const llvm::Type *ElementType = VType->getElementType();
+
+ // Emit individual vector element stores.
+ llvm::Value *V = llvm::UndefValue::get(VType);
+
+ // Emit initializers
+ unsigned i;
+ for (i = 0; i < NumInitElements; ++i) {
+ Value *NewV = Visit(E->getInit(i));
+ Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ V = Builder.CreateInsertElement(V, NewV, Idx);
+ }
+
+ // Emit remaining default initializers
+ for (/* Do not initialize i*/; i < NumVectorElements; ++i) {
+ Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ llvm::Value *NewV = llvm::Constant::getNullValue(ElementType);
+ V = Builder.CreateInsertElement(V, NewV, Idx);
+ }
+
+ return V;
+ }
+
+ Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ Value *VisitImplicitCastExpr(const ImplicitCastExpr *E);
+ Value *VisitCastExpr(const CastExpr *E) {
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (E->getType()->isVariablyModifiedType())
+ CGF.EmitVLASize(E->getType());
+
+ return EmitCastExpr(E->getSubExpr(), E->getType());
+ }
+ Value *EmitCastExpr(const Expr *E, QualType T);
+
+ Value *VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getScalarVal();
+ }
+
+ Value *VisitStmtExpr(const StmtExpr *E);
+
+ Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
+
+ // Unary Operators.
+ Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
+ Value *VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ Value *VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ Value *VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ Value *VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ return EmitLValue(E->getSubExpr()).getAddress();
+ }
+ Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitUnaryPlus(const UnaryOperator *E) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
+ TestAndClearIgnoreResultAssign();
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryMinus (const UnaryOperator *E);
+ Value *VisitUnaryNot (const UnaryOperator *E);
+ Value *VisitUnaryLNot (const UnaryOperator *E);
+ Value *VisitUnaryReal (const UnaryOperator *E);
+ Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryOffsetOf(const UnaryOperator *E);
+
+ // C++
+ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+ return CGF.LoadCXXThis();
+ }
+
+ Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+ return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
+ }
+ Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+ return CGF.EmitCXXNewExpr(E);
+ }
+
+ // Binary Operators.
+ Value *EmitMul(const BinOpInfo &Ops) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ }
+ /// Create a binary op that checks for overflow.
+ /// Currently only supports +, - and *.
+ Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ Value *EmitDiv(const BinOpInfo &Ops);
+ Value *EmitRem(const BinOpInfo &Ops);
+ Value *EmitAdd(const BinOpInfo &Ops);
+ Value *EmitSub(const BinOpInfo &Ops);
+ Value *EmitShl(const BinOpInfo &Ops);
+ Value *EmitShr(const BinOpInfo &Ops);
+ Value *EmitAnd(const BinOpInfo &Ops) {
+ return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+ }
+ Value *EmitXor(const BinOpInfo &Ops) {
+ return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+ }
+ Value *EmitOr (const BinOpInfo &Ops) {
+ return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+ }
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+ // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+ Value *VisitBin ## OP(const BinaryOperator *E) { \
+ return Emit ## OP(EmitBinOps(E)); \
+ } \
+ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+ }
+ HANDLEBINOP(Mul);
+ HANDLEBINOP(Div);
+ HANDLEBINOP(Rem);
+ HANDLEBINOP(Add);
+ HANDLEBINOP(Sub);
+ HANDLEBINOP(Shl);
+ HANDLEBINOP(Shr);
+ HANDLEBINOP(And);
+ HANDLEBINOP(Xor);
+ HANDLEBINOP(Or);
+#undef HANDLEBINOP
+
+ // Comparisons.
+ Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+ Value *VisitBin##CODE(const BinaryOperator *E) { \
+ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+ llvm::FCmpInst::FP); }
+ VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT);
+ VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT);
+ VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE);
+ VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE);
+ VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ);
+ VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE);
+#undef VISITCOMP
+
+ Value *VisitBinAssign (const BinaryOperator *E);
+
+ Value *VisitBinLAnd (const BinaryOperator *E);
+ Value *VisitBinLOr (const BinaryOperator *E);
+ Value *VisitBinComma (const BinaryOperator *E);
+
+ // Other Operators.
+ Value *VisitBlockExpr(const BlockExpr *BE);
+ Value *VisitConditionalOperator(const ConditionalOperator *CO);
+ Value *VisitChooseExpr(ChooseExpr *CE);
+ Value *VisitVAArgExpr(VAArgExpr *VE);
+ Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return CGF.EmitObjCStringLiteral(E);
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value. This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+ assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs");
+
+ if (SrcType->isRealFloatingType()) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateFCmpUNE(Src, Zero, "tobool");
+ }
+
+ assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+ "Unknown scalar type to convert");
+
+ // Because of the type rules of C, we often end up computing a logical value,
+ // then zero extending it to int, then wanting it as a logical value again.
+ // Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
+ if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ // Compare against an integer or pointer null.
+ llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+ return Builder.CreateICmpNE(Src, Zero, "tobool");
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType) {
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ if (DstType->isVoidType()) return 0;
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstType->isBooleanType())
+ return EmitConversionToBool(Src, SrcType);
+
+ const llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (Src->getType() == DstTy)
+ return Src;
+
+ // Handle pointer conversions next: pointers can only be converted
+ // to/from other pointers and integers. Check for pointer types in
+ // terms of LLVM, as some native types (like Obj-C id) may map to a
+ // pointer type.
+ if (isa<llvm::PointerType>(DstTy)) {
+ // The source value may be an integer, or a pointer.
+ if (isa<llvm::PointerType>(Src->getType()))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+ assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ const llvm::Type *MiddleTy = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ bool InputSigned = SrcType->isSignedIntegerType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+ // Then, cast to pointer.
+ return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+ }
+
+ if (isa<llvm::PointerType>(Src->getType())) {
+ // Must be an ptr to int cast.
+ assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+ return Builder.CreatePtrToInt(Src, DstTy, "conv");
+ }
+
+ // A scalar can be splatted to an extended vector of the same element type
+ if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) {
+ // Cast the scalar to element type
+ QualType EltTy = DstType->getAsExtVectorType()->getElementType();
+ llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+ // Splat the element across to all elements
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ for (unsigned i = 0; i < NumElements; i++)
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ // Allow bitcast from vector to integer/fp of the same size.
+ if (isa<llvm::VectorType>(Src->getType()) ||
+ isa<llvm::VectorType>(DstTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ // Finally, we have the arithmetic types: real int/float.
+ if (isa<llvm::IntegerType>(Src->getType())) {
+ bool InputSigned = SrcType->isSignedIntegerType();
+ if (isa<llvm::IntegerType>(DstTy))
+ return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ return Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ return Builder.CreateUIToFP(Src, DstTy, "conv");
+ }
+
+ assert(Src->getType()->isFloatingPoint() && "Unknown real conversion");
+ if (isa<llvm::IntegerType>(DstTy)) {
+ if (DstType->isSignedIntegerType())
+ return Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPToUI(Src, DstTy, "conv");
+ }
+
+ assert(DstTy->isFloatingPoint() && "Unknown real conversion");
+ if (DstTy->getTypeID() < Src->getType()->getTypeID())
+ return Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ return Builder.CreateFPExt(Src, DstTy, "conv");
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified
+/// complex type to the specified destination type, where the destination
+/// type is an LLVM scalar type.
+Value *ScalarExprEmitter::
+EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy) {
+ // Get the source element type.
+ SrcTy = SrcTy->getAsComplexType()->getElementType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstTy->isBooleanType()) {
+ // Complex != 0 -> (Real != 0) | (Imag != 0)
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+ return Builder.CreateOr(Src.first, Src.second, "tobool");
+ }
+
+ // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+ // the imaginary part of the complex value is discarded and the value of the
+ // real part is converted according to the conversion rules for the
+ // corresponding real type.
+ return EmitScalarConversion(Src.first, SrcTy, DstTy);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "scalar expression");
+ if (E->getType()->isVoidType())
+ return 0;
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ llvm::SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
+ }
+ Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+ Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+ Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
+ return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Emit subscript expressions in rvalue context's. For most cases, this just
+ // loads the lvalue formed by the subscript expr. However, we have to be
+ // careful, because the base of a vector subscript is occasionally an rvalue,
+ // so we can't get it as an lvalue.
+ if (!E->getBase()->getType()->isVectorType())
+ return EmitLoadOfLValue(E);
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *Base = Visit(E->getBase());
+ Value *Idx = Visit(E->getIdx());
+ bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
+ Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned,
+ "vecidxcast");
+ return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but
+/// also handle things like function to pointer-to-function decay, and array to
+/// pointer decay.
+Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+ const Expr *Op = E->getSubExpr();
+
+ // If this is due to array->pointer conversion, emit the array expression as
+ // an l-value.
+ if (Op->getType()->isArrayType()) {
+ Value *V = EmitLValue(Op).getAddress(); // Bitfields can't be arrays.
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!Op->getType()->isVariableArrayType()) {
+ assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
+ assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
+ ->getElementType()) &&
+ "Expected pointer to array");
+ V = Builder.CreateStructGEP(V, 0, "arraydecay");
+ }
+
+ // The resultant pointer type can be implicitly casted to other pointer
+ // types as well (e.g. void*) and can be implicitly converted to integer.
+ const llvm::Type *DestTy = ConvertType(E->getType());
+ if (V->getType() != DestTy) {
+ if (isa<llvm::PointerType>(DestTy))
+ V = Builder.CreateBitCast(V, DestTy, "ptrconv");
+ else {
+ assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay");
+ V = Builder.CreatePtrToInt(V, DestTy, "ptrconv");
+ }
+ }
+ return V;
+ }
+
+ return EmitCastExpr(Op, E->getType());
+}
+
+
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) {
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ // Handle cases where the source is an non-complex type.
+
+ if (!CGF.hasAggregateLLVMType(E->getType())) {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // Use EmitScalarConversion to perform the conversion.
+ return EmitScalarConversion(Src, E->getType(), DestTy);
+ }
+
+ if (E->getType()->isAnyComplexType()) {
+ // Handle cases where the source is a complex type.
+ bool IgnoreImag = true;
+ bool IgnoreImagAssign = true;
+ bool IgnoreReal = IgnoreResultAssign;
+ bool IgnoreRealAssign = IgnoreResultAssign;
+ if (DestTy->isBooleanType())
+ IgnoreImagAssign = IgnoreImag = false;
+ else if (DestTy->isVoidType()) {
+ IgnoreReal = IgnoreImag = false;
+ IgnoreRealAssign = IgnoreImagAssign = true;
+ }
+ CodeGenFunction::ComplexPairTy V
+ = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+ IgnoreImagAssign);
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+ }
+
+ // Okay, this is a cast from an aggregate. It must be a cast to void. Just
+ // evaluate the result and return.
+ CGF.EmitAggExpr(E, 0, false, true);
+ return 0;
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ return CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType()).getScalarVal();
+}
+
+Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+ return Builder.CreateLoad(CGF.GetAddrOfBlockDecl(E), false, "tmp");
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Operators
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType ValTy = E->getSubExpr()->getType();
+ Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal();
+
+ int AmountVal = isInc ? 1 : -1;
+
+ if (ValTy->isPointerType() &&
+ ValTy->getAsPointerType()->isVariableArrayType()) {
+ // The amount of the addition/subtraction needs to account for the VLA size
+ CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
+ }
+
+ Value *NextVal;
+ if (const llvm::PointerType *PT =
+ dyn_cast<llvm::PointerType>(InVal->getType())) {
+ llvm::Constant *Inc =llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal);
+ if (!isa<llvm::FunctionType>(PT->getElementType())) {
+ NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec");
+ } else {
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+ NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+ NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+ }
+ } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) {
+ // Bool++ is an interesting case, due to promotion rules, we get:
+ // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+ // Bool = ((int)Bool+1) != 0
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ NextVal = llvm::ConstantInt::getTrue();
+ } else {
+ // Add the inc/dec to the real part.
+ if (isa<llvm::IntegerType>(InVal->getType()))
+ NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+ else if (InVal->getType() == llvm::Type::FloatTy)
+ NextVal =
+ llvm::ConstantFP::get(llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType() == llvm::Type::DoubleTy)
+ NextVal =
+ llvm::ConstantFP::get(llvm::APFloat(static_cast<double>(AmountVal)));
+ else {
+ llvm::APFloat F(static_cast<float>(AmountVal));
+ bool ignored;
+ F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ NextVal = llvm::ConstantFP::get(F);
+ }
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitfield())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy,
+ &NextVal);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? NextVal : InVal;
+}
+
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNeg(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+ // Compare operand to zero.
+ Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+ // Invert value.
+ // TODO: Could dynamically modify easy computations here. For example, if
+ // the operand is an icmp ne, turn into icmp eq.
+ BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+ // ZExt result to the expr type.
+ return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
+ QualType TypeToSize = E->getTypeOfArgument();
+ if (E->isSizeOf()) {
+ if (const VariableArrayType *VAT =
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ if (E->isArgumentType()) {
+ // sizeof(type) - make sure to emit the VLA size.
+ CGF.EmitVLASize(TypeToSize);
+ } else {
+ // C99 6.5.3.4p2: If the argument is an expression of type
+ // VLA, it is evaluated.
+ CGF.EmitAnyExpr(E->getArgumentExpr());
+ }
+
+ return CGF.GetVLASize(VAT);
+ }
+ }
+
+ // If this isn't sizeof(vla), the result must be constant; use the
+ // constant folding logic so we don't have to duplicate it here.
+ Expr::EvalResult Result;
+ E->Evaluate(Result, CGF.getContext());
+ return llvm::ConstantInt::get(Result.Val.getInt());
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+ return Visit(Op);
+}
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType())
+ return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+
+ // __imag on a scalar returns zero. Emit the subexpr to ensure side
+ // effects are evaluated, but not the actual value.
+ if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
+{
+ Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
+ const llvm::Type* ResultType = ConvertType(E->getType());
+ return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ BinOpInfo Result;
+ Result.LHS = Visit(E->getLHS());
+ Result.RHS = Visit(E->getRHS());
+ Result.Ty = E->getType();
+ Result.E = E;
+ return Result;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ if (E->getComputationResultType()->isAnyComplexType()) {
+ // This needs to go through the complex expression emitter, but
+ // it's a tad complicated to do that... I'm leaving it out for now.
+ // (Note that we do actually need the imaginary part of the RHS for
+ // multiplication and division.)
+ CGF.ErrorUnsupported(E, "complex compound assignment");
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ }
+
+ // Emit the RHS first. __block variables need to have the rhs evaluated
+ // first, plus this should improve codegen a little.
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.E = E;
+ // Load/convert the LHS.
+ LValue LHSLV = EmitLValue(E->getLHS());
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
+ // Expand the binary operator.
+ Value *Result = (this->*Func)(OpInfo);
+
+ // Convert the result back to the LHS type.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ // Store the result value into the LHS lvalue. Bit-fields are
+ // handled specially because the result is altered by the store,
+ // i.e., [C99 6.5.16p1] 'An assignment expression has the value of
+ // the left operand after the assignment...'.
+ if (LHSLV.isBitfield()) {
+ if (!LHSLV.isVolatileQualified()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
+ &Result);
+ return Result;
+ } else
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
+ } else
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+ if (Ignore)
+ return 0;
+ return EmitLoadOfLValue(LHSLV, E->getType());
+}
+
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (Ops.LHS->getType()->isFPOrFPVector())
+ return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ else if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+ else
+ return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+ // Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+ else
+ return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+ unsigned IID;
+ unsigned OpID = 0;
+
+ switch (Ops.E->getOpcode()) {
+ case BinaryOperator::Add:
+ case BinaryOperator::AddAssign:
+ OpID = 1;
+ IID = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case BinaryOperator::Sub:
+ case BinaryOperator::SubAssign:
+ OpID = 2;
+ IID = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case BinaryOperator::Mul:
+ case BinaryOperator::MulAssign:
+ OpID = 3;
+ IID = llvm::Intrinsic::smul_with_overflow;
+ break;
+ default:
+ assert(false && "Unsupported operation for overflow detection");
+ IID = 0;
+ }
+ OpID <<= 1;
+ OpID |= 1;
+
+ const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
+
+ Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
+ Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+ Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+ // Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *overflowBB =
+ CGF.createBasicBlock("overflow", CGF.CurFn);
+ llvm::BasicBlock *continueBB =
+ CGF.createBasicBlock("overflow.continue", CGF.CurFn);
+
+ Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+ // Handle overflow
+
+ Builder.SetInsertPoint(overflowBB);
+
+ // Handler is:
+ // long long *__overflow_handler)(long long a, long long b, char op,
+ // char width)
+ std::vector<const llvm::Type*> handerArgTypes;
+ handerArgTypes.push_back(llvm::Type::Int64Ty);
+ handerArgTypes.push_back(llvm::Type::Int64Ty);
+ handerArgTypes.push_back(llvm::Type::Int8Ty);
+ handerArgTypes.push_back(llvm::Type::Int8Ty);
+ llvm::FunctionType *handlerTy = llvm::FunctionType::get(llvm::Type::Int64Ty,
+ handerArgTypes, false);
+ llvm::Value *handlerFunction =
+ CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
+ llvm::PointerType::getUnqual(handlerTy));
+ handlerFunction = Builder.CreateLoad(handlerFunction);
+
+ llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
+ Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty),
+ Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty),
+ llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID),
+ llvm::ConstantInt::get(llvm::Type::Int8Ty,
+ cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+
+ Builder.CreateBr(continueBB);
+
+ // Set up the continuation
+ Builder.SetInsertPoint(continueBB);
+ // Get the correct result
+ llvm::PHINode *phi = Builder.CreatePHI(opTy);
+ phi->reserveOperandSpace(2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
+ if (!Ops.Ty->isPointerType()) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+ return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+ }
+
+ if (Ops.Ty->getAsPointerType()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size
+ CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
+ }
+ Value *Ptr, *Idx;
+ Expr *IdxExp;
+ const PointerType *PT;
+ if ((PT = Ops.E->getLHS()->getType()->getAsPointerType())) {
+ Ptr = Ops.LHS;
+ Idx = Ops.RHS;
+ IdxExp = Ops.E->getRHS();
+ } else { // int + pointer
+ PT = Ops.E->getRHS()->getType()->getAsPointerType();
+ assert(PT && "Invalid add expr");
+ Ptr = Ops.RHS;
+ Idx = Ops.LHS;
+ IdxExp = Ops.E->getLHS();
+ }
+
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ if (IdxExp->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+
+ const QualType ElementType = PT->getPointeeType();
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().getTypeSize(OIT) / 8);
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic
+ // extensions. The GNU void* casts amount to no-ops since our void*
+ // type is i8*, but this is future proof.
+ if (ElementType->isVoidType() || ElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+ Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ptr->getType());
+ }
+
+ return Builder.CreateGEP(Ptr, Idx, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
+ if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
+ if (CGF.getContext().getLangOptions().OverflowChecking
+ && Ops.Ty->isSignedIntegerType())
+ return EmitOverflowCheckedBinOp(Ops);
+ return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+ }
+
+ if (Ops.E->getLHS()->getType()->getAsPointerType()->isVariableArrayType()) {
+ // The amount of the addition needs to account for the VLA size for
+ // ptr-int
+ // The amount of the division needs to account for the VLA size for
+ // ptr-ptr.
+ CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
+ }
+
+ const QualType LHSType = Ops.E->getLHS()->getType();
+ const QualType LHSElementType = LHSType->getAsPointerType()->getPointeeType();
+ if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
+ // pointer - int
+ Value *Idx = Ops.RHS;
+ unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+ if (Width < CGF.LLVMPointerWidth) {
+ // Zero or sign extend the pointer value based on whether the index is
+ // signed or not.
+ const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ if (Ops.E->getRHS()->getType()->isSignedIntegerType())
+ Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+ else
+ Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+ }
+ Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
+
+ // Handle interface types, which are not represented with a concrete
+ // type.
+ if (const ObjCInterfaceType *OIT =
+ dyn_cast<ObjCInterfaceType>(LHSElementType)) {
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ CGF.getContext().getTypeSize(OIT) / 8);
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic
+ // extensions. The GNU void* casts amount to no-ops since our
+ // void* type is i8*, but this is future proof.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+ Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
+ return Builder.CreateBitCast(Res, Ops.LHS->getType());
+ }
+
+ return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
+ } else {
+ // pointer - pointer
+ Value *LHS = Ops.LHS;
+ Value *RHS = Ops.RHS;
+
+ uint64_t ElementSize;
+
+ // Handle GCC extension for pointer arithmetic on void* and function pointer
+ // types.
+ if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+ ElementSize = 1;
+ } else {
+ ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8;
+ }
+
+ const llvm::Type *ResultType = ConvertType(Ops.Ty);
+ LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Optimize out the shift for element size of 1.
+ if (ElementSize == 1)
+ return BytesBetween;
+
+ // HACK: LLVM doesn't have an divide instruction that 'knows' there is no
+ // remainder. As such, we handle common power-of-two cases here to generate
+ // better code. See PR2247.
+ if (llvm::isPowerOf2_64(ElementSize)) {
+ Value *ShAmt =
+ llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize));
+ return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr");
+ }
+
+ // Otherwise, do a full sdiv.
+ Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize);
+ return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ }
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (Ops.Ty->isUnsignedIntegerType())
+ return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+ return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc) {
+ TestAndClearIgnoreResultAssign();
+ Value *Result;
+ QualType LHSTy = E->getLHS()->getType();
+ if (!LHSTy->isAnyComplexType() && !LHSTy->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ if (LHS->getType()->isFloatingPoint()) {
+ Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->isSignedIntegerType()) {
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Unsigned integers and pointers.
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ }
+ } else if (LHSTy->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ if (LHS->getType()->isFPOrFPVector()) {
+ Result = Builder.CreateVFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->isUnsignedIntegerType()) {
+ Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Signed integers and pointers.
+ Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ }
+ return Result;
+ } else {
+ // Complex Comparison: can only be an equality comparison.
+ CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
+ CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
+
+ QualType CETy = LHSTy->getAsComplexType()->getElementType();
+
+ Value *ResultR, *ResultI;
+ if (CETy->isRealFloatingType()) {
+ ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ } else {
+ // Complex comparisons can only be equality comparisons. As such, signed
+ // and unsigned opcodes are the same.
+ ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ }
+
+ if (E->getOpcode() == BinaryOperator::EQ) {
+ Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+ } else {
+ assert(E->getOpcode() == BinaryOperator::NE &&
+ "Complex comparison other than == or != ?");
+ Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+ }
+ }
+
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen just a little.
+ Value *RHS = Visit(E->getRHS());
+ LValue LHS = EmitLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitfield()) {
+ if (!LHS.isVolatileQualified()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+ &RHS);
+ return RHS;
+ } else
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
+ } else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+ if (Ignore)
+ return 0;
+ return EmitLoadOfLValue(LHS, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+ // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+ // If we have 1 && X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == 1) { // If we have 1 && X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int.
+ return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext");
+ }
+
+ // 0 && RHS: If it is safe, just elide the RHS, and return 0.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::Constant::getNullValue(CGF.LLVMIntTy);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+
+ // Branch on the LHS first. If it is false, go to the failure (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be false. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getFalse(), *PI);
+
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+ // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+ // If we have 0 || X, just emit X without inserting the control flow.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+ if (Cond == -1) { // If we have 0 || X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int.
+ return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext");
+ }
+
+ // 1 || RHS: If it is safe, just elide the RHS, and return 1.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::ConstantInt::get(CGF.LLVMIntTy, 1);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+ // Branch on the LHS first. If it is true, go to the success (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be true. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
+ PN->reserveOperandSpace(2); // Normal case, two inputs.
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getTrue(), *PI);
+
+ // Emit the RHS condition as a bool value.
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitStmt(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally. This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+ return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr());
+
+ // TODO: Allow anything we can constant fold to an integer or fp constant.
+ if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
+ isa<FloatingLiteral>(E))
+ return true;
+
+ // Non-volatile automatic variables too, to get "cond ? X : Y" where
+ // X and Y are local variables.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified())
+ return true;
+
+ return false;
+}
+
+
+Value *ScalarExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
+ Expr *Live = E->getLHS(), *Dead = E->getRHS();
+ if (Cond == -1)
+ std::swap(Live, Dead);
+
+ // If the dead side doesn't have labels we need, and if the Live side isn't
+ // the gnu missing ?: extension (which we could handle, but don't bother
+ // to), just emit the Live part.
+ if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part
+ Live) // Live part isn't missing.
+ return Visit(Live);
+ }
+
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap and
+ // safe to evaluate the LHS and RHS unconditionally.
+ if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS()) &&
+ isCheapEnoughToEvaluateUnconditionally(E->getRHS())) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
+ llvm::Value *LHS = Visit(E->getLHS());
+ llvm::Value *RHS = Visit(E->getRHS());
+ return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+ }
+
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+ Value *CondVal = 0;
+
+ // If we don't have the GNU missing condition extension, emit a branch on
+ // bool the normal way.
+ if (E->getLHS()) {
+ // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
+ // the branch on bool.
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+ } else {
+ // Otherwise, for the ?: extension, evaluate the conditional and then
+ // convert it to bool the hard way. We do this explicitly because we need
+ // the unconverted value for the missing middle value of the ?:.
+ CondVal = CGF.EmitScalarExpr(E->getCond());
+
+ // In some cases, EmitScalarConversion will delete the "CondVal" expression
+ // if there are no extra uses (an optimization). Inhibit this by making an
+ // extra dead use, because we're going to add a use of CondVal later. We
+ // don't use the builder for this, because we don't want it to get optimized
+ // away. This leaves dead code, but the ?: extension isn't common.
+ new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
+ Builder.GetInsertBlock());
+
+ Value *CondBoolVal =
+ CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
+ CGF.getContext().BoolTy);
+ Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
+ }
+
+ CGF.EmitBlock(LHSBlock);
+
+ // Handle the GNU extension for missing LHS.
+ Value *LHS;
+ if (E->getLHS())
+ LHS = Visit(E->getLHS());
+ else // Perform promotions, to handle cases like "short ?: int"
+ LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+
+ Value *RHS = Visit(E->getRHS());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+
+ if (!LHS || !RHS) {
+ assert(E->getType()->isVoidType() && "Non-void value should have a value");
+ return 0;
+ }
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
+ PN->reserveOperandSpace(2);
+ PN->addIncoming(LHS, LHSBlock);
+ PN->addIncoming(RHS, RHSBlock);
+ return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ if (!ArgPtr)
+ return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
+
+ // FIXME Volatility.
+ return Builder.CreateLoad(ArgPtr);
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
+ return CGF.BuildBlockLiteralTmp(BE);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitScalarExpr - Emit the computation of the specified expression of
+/// scalar type, ignoring the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+ assert(E && !hasAggregateLLVMType(E->getType()) &&
+ "Invalid scalar expression to emit");
+
+ return ScalarExprEmitter(*this, IgnoreResultAssign)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+ QualType DstTy) {
+ assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
+ "Invalid scalar expression to emit");
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified
+/// complex type to the specified destination type, where the destination
+/// type is an LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+ QualType SrcTy,
+ QualType DstTy) {
+ assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
+ "Invalid complex -> scalar conversion");
+ return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
+ DstTy);
+}
+
+Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
+ assert(V1->getType() == V2->getType() &&
+ "Vector operands must be of the same type");
+ unsigned NumElements =
+ cast<llvm::VectorType>(V1->getType())->getNumElements();
+
+ va_list va;
+ va_start(va, V2);
+
+ llvm::SmallVector<llvm::Constant*, 16> Args;
+ for (unsigned i = 0; i < NumElements; i++) {
+ int n = va_arg(va, int);
+ assert(n >= 0 && n < (int)NumElements * 2 &&
+ "Vector shuffle index out of bounds!");
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n));
+ }
+
+ const char *Name = va_arg(va, const char *);
+ va_end(va);
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+
+ return Builder.CreateShuffleVector(V1, V2, Mask, Name);
+}
+
+llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
+ unsigned NumVals, bool isSplat) {
+ llvm::Value *Vec
+ = llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals));
+
+ for (unsigned i = 0, e = NumVals; i != e; ++i) {
+ llvm::Value *Val = isSplat ? Vals[0] : Vals[i];
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp");
+ }
+
+ return Vec;
+}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..51f9a76
--- /dev/null
+++ b/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,644 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+ llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(E);
+ // FIXME: This bitcast should just be made an invariant on the Runtime.
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+ // Untyped selector.
+ // Note that this implementation allows for non-constant strings to be passed
+ // as arguments to @selector(). Currently, the only thing preventing this
+ // behaviour is the type checking in the front end.
+ return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+ // FIXME: This should pass the Decl not the name.
+ return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
+}
+
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
+ // Only the lookup mechanism and first two arguments of the method
+ // implementation vary between runtimes. We can get the receiver and
+ // arguments in generic code.
+
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ const Expr *ReceiverExpr = E->getReceiver();
+ bool isSuperMessage = false;
+ bool isClassMessage = false;
+ // Find the receiver
+ llvm::Value *Receiver;
+ if (!ReceiverExpr) {
+ const ObjCInterfaceDecl *OID = E->getClassInfo().first;
+
+ // Very special case, super send in class method. The receiver is
+ // self (the class object) and the send uses super semantics.
+ if (!OID) {
+ assert(E->getClassName()->isStr("super") &&
+ "Unexpected missing class interface in message send.");
+ isSuperMessage = true;
+ Receiver = LoadObjCSelf();
+ } else {
+ Receiver = Runtime.GetClass(Builder, OID);
+ }
+
+ isClassMessage = true;
+ } else if (isa<ObjCSuperExpr>(E->getReceiver())) {
+ isSuperMessage = true;
+ Receiver = LoadObjCSelf();
+ } else {
+ Receiver = EmitScalarExpr(E->getReceiver());
+ }
+
+ CallArgList Args;
+ EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+
+ if (isSuperMessage) {
+ // super is only valid in an Objective-C method
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return Runtime.GenerateMessageSendSuper(*this, E->getType(),
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args);
+ }
+ return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
+ Receiver, isClassMessage, Args,
+ E->getMethodDecl());
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ FunctionArgList Args;
+ llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+ const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
+ CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+ Args.push_back(std::make_pair(OMD->getSelfDecl(),
+ OMD->getSelfDecl()->getType()));
+ Args.push_back(std::make_pair(OMD->getCmdDecl(),
+ OMD->getCmdDecl()->getType()));
+
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI)
+ Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+
+ StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocEnd());
+}
+
+/// Generate an Objective-C method. An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+ // Check if we should generate debug info for this method.
+ if (CGM.getDebugInfo() && !OMD->hasAttr<NodebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+ StartObjCMethod(OMD, OMD->getClassInterface());
+ EmitStmt(OMD->getBody(getContext()));
+ FinishFunction(OMD->getBodyRBrace(getContext()));
+}
+
+// FIXME: I wasn't sure about the synthesis approach. If we end up generating an
+// AST for the whole body we can just fall back to having a GenerateFunction
+// which takes the body Stmt.
+
+/// GenerateObjCGetter - Generate an Objective-C property getter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+ assert(OMD && "Invalid call to generate getter (empty method)");
+ // FIXME: This is rather murky, we create this here since they will not have
+ // been created by Sema for us.
+ OMD->createImplicitParams(getContext(), IMP->getClassInterface());
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ // Determine if we should use an objc_getProperty call for
+ // this. Non-atomic properties are directly evaluated.
+ // atomic 'copy' and 'retain' properties are also directly
+ // evaluated in gc-only mode.
+ if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (PD->getSetterKind() == ObjCPropertyDecl::Copy ||
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *GetPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+
+ if (!GetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the
+ // runtime already should have computed it to build the function.
+ RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args),
+ GetPropertyFn, Args);
+ // We need to fix the type here. Ivars with copy & retain are
+ // always objects so we don't need to worry about complex or
+ // aggregates.
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ } else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+ if (hasAggregateLLVMType(Ivar->getType())) {
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ }
+ else {
+ CodeGenTypes &Types = CGM.getTypes();
+ RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ Types.ConvertType(PD->getType())));
+ EmitReturnOfRValue(RV, PD->getType());
+ }
+ }
+
+ FinishFunction();
+}
+
+/// GenerateObjCSetter - Generate an Objective-C property setter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+ assert(OMD && "Invalid call to generate setter (empty method)");
+ // FIXME: This is rather murky, we create this here since they will not have
+ // been created by Sema for us.
+ OMD->createImplicitParams(getContext(), IMP->getClassInterface());
+ StartObjCMethod(OMD, IMP->getClassInterface());
+
+ bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
+ bool IsAtomic =
+ !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
+
+ // Determine if we should use an objc_setProperty call for
+ // this. Properties with 'copy' semantics always use it, as do
+ // non-atomic properties with 'release' semantics as long as we are
+ // not in gc-only mode.
+ if (IsCopy ||
+ (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+ PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+ llvm::Value *SetPropertyFn =
+ CGM.getObjCRuntime().GetPropertySetFunction();
+
+ if (!SetPropertyFn) {
+ CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+ FinishFunction();
+ return;
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ CodeGenTypes &Types = CGM.getTypes();
+ ValueDecl *Cmd = OMD->getCmdDecl();
+ llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+ QualType IdTy = getContext().getObjCIdType();
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+ llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+ llvm::Value *ArgAsId =
+ Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
+ Types.ConvertType(IdTy));
+ llvm::Value *True =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+ llvm::Value *False =
+ llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+ Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+ Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
+ Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
+ getContext().BoolTy));
+ Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
+ getContext().BoolTy));
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args),
+ SetPropertyFn, Args);
+ } else {
+ SourceLocation Loc = PD->getLocation();
+ ValueDecl *Self = OMD->getSelfDecl();
+ ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+ DeclRefExpr Base(Self, Self->getType(), Loc);
+ ParmVarDecl *ArgDecl = *OMD->param_begin();
+ DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
+ ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base,
+ true, true);
+ BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign,
+ Ivar->getType(), Loc);
+ EmitStmt(&Assign);
+ }
+
+ FinishFunction();
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ // See if we need to lazily forward self inside a block literal.
+ BlockForwardSelf();
+ return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+ const PointerType *PTy =
+ cast<PointerType>(getContext().getCanonicalType(selfDecl->getType()));
+ return PTy->getPointeeType();
+}
+
+RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
+ const Selector &S) {
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ Exp->getType(),
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ CallArgList());
+
+}
+
+RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getGetterName();
+ if (isa<ObjCSuperExpr>(E->getBase()))
+ return EmitObjCSuperPropertyGet(E, S);
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Exp->getType(), S,
+ EmitScalarExpr(E->getBase()),
+ false, CallArgList());
+ }
+ else {
+ const ObjCKVCRefExpr *KE = cast<ObjCKVCRefExpr>(Exp);
+ Selector S = KE->getGetterMethod()->getSelector();
+ llvm::Value *Receiver;
+ if (KE->getClassProp()) {
+ const ObjCInterfaceDecl *OID = KE->getClassProp();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ }
+ else if (isa<ObjCSuperExpr>(KE->getBase()))
+ return EmitObjCSuperPropertyGet(KE, S);
+ else
+ Receiver = EmitScalarExpr(KE->getBase());
+ return CGM.getObjCRuntime().
+ GenerateMessageSend(*this, Exp->getType(), S,
+ Receiver,
+ KE->getClassProp() != 0, CallArgList());
+ }
+}
+
+void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
+ const Selector &S,
+ RValue Src) {
+ CallArgList Args;
+ llvm::Value *Receiver = LoadObjCSelf();
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isClassMessage = OMD->isClassMethod();
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ Args.push_back(std::make_pair(Src, Exp->getType()));
+ CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ Exp->getType(),
+ S,
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args);
+ return;
+}
+
+void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
+ RValue Src) {
+ // FIXME: Split it into two separate routines.
+ if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+ Selector S = E->getProperty()->getSetterName();
+ if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ }
+ CallArgList Args;
+ Args.push_back(std::make_pair(Src, E->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ EmitScalarExpr(E->getBase()),
+ false, Args);
+ }
+ else if (const ObjCKVCRefExpr *E = dyn_cast<ObjCKVCRefExpr>(Exp)) {
+ Selector S = E->getSetterMethod()->getSelector();
+ CallArgList Args;
+ llvm::Value *Receiver;
+ if (E->getClassProp()) {
+ const ObjCInterfaceDecl *OID = E->getClassProp();
+ Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+ }
+ else if (isa<ObjCSuperExpr>(E->getBase())) {
+ EmitObjCSuperPropertySet(E, S, Src);
+ return;
+ }
+ else
+ Receiver = EmitScalarExpr(E->getBase());
+ Args.push_back(std::make_pair(Src, E->getType()));
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ Receiver,
+ E->getClassProp() != 0, Args);
+ }
+ else
+ assert (0 && "bad expression node in EmitObjCPropertySet");
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+ llvm::Constant *EnumerationMutationFn =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
+ llvm::Value *DeclAddress;
+ QualType ElementTy;
+
+ if (!EnumerationMutationFn) {
+ CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+ return;
+ }
+
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ EmitStmt(SD);
+ assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
+ const Decl* D = SD->getSingleDecl();
+ ElementTy = cast<ValueDecl>(D)->getType();
+ DeclAddress = LocalDeclMap[D];
+ } else {
+ ElementTy = cast<Expr>(S.getElement())->getType();
+ DeclAddress = 0;
+ }
+
+ // Fast enumeration state.
+ QualType StateTy = getContext().getObjCFastEnumerationStateType();
+ llvm::AllocaInst *StatePtr = CreateTempAlloca(ConvertType(StateTy),
+ "state.ptr");
+ StatePtr->setAlignment(getContext().getTypeAlign(StateTy) >> 3);
+ EmitMemSetToZero(StatePtr, StateTy);
+
+ // Number of elements in the items array.
+ static const unsigned NumItems = 16;
+
+ // Get selector
+ llvm::SmallVector<IdentifierInfo*, 3> II;
+ II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState"));
+ II.push_back(&CGM.getContext().Idents.get("objects"));
+ II.push_back(&CGM.getContext().Idents.get("count"));
+ Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
+ &II[0]);
+
+ QualType ItemsTy =
+ getContext().getConstantArrayType(getContext().getObjCIdType(),
+ llvm::APInt(32, NumItems),
+ ArrayType::Normal, 0);
+ llvm::Value *ItemsPtr = CreateTempAlloca(ConvertType(ItemsTy), "items.ptr");
+
+ llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(StatePtr),
+ getContext().getPointerType(StateTy)));
+
+ Args.push_back(std::make_pair(RValue::get(ItemsPtr),
+ getContext().getPointerType(ItemsTy)));
+
+ const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+ Args.push_back(std::make_pair(RValue::get(Count),
+ getContext().UnsignedLongTy));
+
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, false, Args);
+
+ llvm::Value *LimitPtr = CreateTempAlloca(UnsignedLongLTy, "limit.ptr");
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+
+ llvm::BasicBlock *NoElements = createBasicBlock("noelements");
+ llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
+
+ llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+ llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
+
+ EmitBlock(SetStartMutations);
+
+ llvm::Value *StartMutationsPtr =
+ CreateTempAlloca(UnsignedLongLTy);
+
+ llvm::Value *StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ "mutationsptr");
+
+ llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
+ "mutations");
+
+ Builder.CreateStore(StateMutations, StartMutationsPtr);
+
+ llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
+ EmitBlock(LoopStart);
+
+ llvm::Value *CounterPtr = CreateTempAlloca(UnsignedLongLTy, "counter.ptr");
+ Builder.CreateStore(Zero, CounterPtr);
+
+ llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
+ EmitBlock(LoopBody);
+
+ StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+
+ llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
+ "mutations");
+ llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
+ StartMutations,
+ "tobool");
+
+
+ llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
+ llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
+
+ Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
+
+ EmitBlock(WasMutated);
+ llvm::Value *V =
+ Builder.CreateBitCast(Collection,
+ ConvertType(getContext().getObjCIdType()),
+ "tmp");
+ CallArgList Args2;
+ Args2.push_back(std::make_pair(RValue::get(V),
+ getContext().getObjCIdType()));
+ // FIXME: We shouldn't need to get the function info here, the runtime already
+ // should have computed it to build the function.
+ EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2),
+ EnumerationMutationFn, Args2);
+
+ EmitBlock(WasNotMutated);
+
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+
+ llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
+
+ llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
+ "stateitems");
+
+ llvm::Value *CurrentItemPtr =
+ Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
+
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
+
+ // Cast the item to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem,
+ ConvertType(ElementTy), "tmp");
+
+ if (!DeclAddress) {
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(CurrentItem, LV.getAddress());
+ } else
+ Builder.CreateStore(CurrentItem, DeclAddress);
+
+ // Increment the counter.
+ Counter = Builder.CreateAdd(Counter,
+ llvm::ConstantInt::get(UnsignedLongLTy, 1));
+ Builder.CreateStore(Counter, CounterPtr);
+
+ llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
+ llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
+
+ BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(AfterBody);
+
+ llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
+
+ Counter = Builder.CreateLoad(CounterPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless");
+ Builder.CreateCondBr(IsLess, LoopBody, FetchMore);
+
+ // Fetch more elements.
+ EmitBlock(FetchMore);
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, false, Args);
+ Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+ Limit = Builder.CreateLoad(LimitPtr);
+
+ IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+ Builder.CreateCondBr(IsZero, NoElements, LoopStart);
+
+ // No more elements.
+ EmitBlock(NoElements);
+
+ if (!DeclAddress) {
+ // If the element was not a declaration, set it to be null.
+
+ LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+ // Set the value to null.
+ Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
+ LV.getAddress());
+ }
+
+ EmitBlock(LoopEnd);
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
+{
+ CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
+{
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+ const ObjCAtSynchronizedStmt &S)
+{
+ CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..5e7eec9
--- /dev/null
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,1582 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the GNU runtime. The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library. These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+
+#include <map>
+
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::dyn_cast;
+
+// The version of the runtime that this class targets. Must match the version
+// in the runtime.
+static const int RuntimeVersion = 8;
+static const int NonFragileRuntimeVersion = 9;
+static const int ProtocolVersion = 2;
+
+namespace {
+class CGObjCGNU : public CodeGen::CGObjCRuntime {
+private:
+ CodeGen::CodeGenModule &CGM;
+ llvm::Module &TheModule;
+ const llvm::PointerType *SelectorTy;
+ const llvm::PointerType *PtrToInt8Ty;
+ const llvm::FunctionType *IMPTy;
+ const llvm::PointerType *IdTy;
+ const llvm::IntegerType *IntTy;
+ const llvm::PointerType *PtrTy;
+ const llvm::IntegerType *LongTy;
+ const llvm::PointerType *PtrToIntTy;
+ llvm::GlobalAlias *ClassPtrAlias;
+ llvm::GlobalAlias *MetaClassPtrAlias;
+ std::vector<llvm::Constant*> Classes;
+ std::vector<llvm::Constant*> Categories;
+ std::vector<llvm::Constant*> ConstantStrings;
+ llvm::Function *LoadFunction;
+ llvm::StringMap<llvm::Constant*> ExistingProtocols;
+ typedef std::pair<std::string, std::string> TypedSelector;
+ std::map<TypedSelector, llvm::GlobalAlias*> TypedSelectors;
+ llvm::StringMap<llvm::GlobalAlias*> UntypedSelectors;
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ llvm::Constant *NULLPtr;
+private:
+ llvm::Constant *GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
+ llvm::Constant *GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList);
+ llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ llvm::Constant *GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols);
+ llvm::Constant *GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols);
+ llvm::Constant *GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
+ llvm::Constant *MakeConstantString(const std::string &Str, const std::string
+ &Name="");
+ llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name="");
+ llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name="");
+ llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+public:
+ CGObjCGNU(CodeGen::CodeGenModule &cgm);
+ virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *);
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs);
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD);
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+ virtual llvm::Function *ModuleInitFunction();
+ virtual llvm::Function *GetPropertyGetFunction();
+ virtual llvm::Function *GetPropertySetFunction();
+ virtual llvm::Function *EnumerationMutationFunction();
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+} // end anonymous namespace
+
+
+
+static std::string SymbolNameForClass(const std::string &ClassName) {
+ return "___objc_class_name_" + ClassName;
+}
+
+static std::string SymbolNameForMethod(const std::string &ClassName, const
+ std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
+{
+ return "._objc_method_" + ClassName +"("+CategoryName+")"+
+ (isClassMethod ? "+" : "-") + MethodName;
+}
+
+CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
+ : CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
+ MetaClassPtrAlias(0) {
+ IntTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+ LongTy = cast<llvm::IntegerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+
+ Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+ Zeros[1] = Zeros[0];
+ NULLPtr = llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty));
+ // C string type. Used in lots of places.
+ PtrToInt8Ty =
+ llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ // Get the selector Type.
+ SelectorTy = cast<llvm::PointerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCSelType()));
+
+ PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+ PtrTy = PtrToInt8Ty;
+
+ // Object type
+ IdTy = cast<llvm::PointerType>(
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCIdType()));
+
+ // IMP type
+ std::vector<const llvm::Type*> IMPArgs;
+ IMPArgs.push_back(IdTy);
+ IMPArgs.push_back(SelectorTy);
+ IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
+}
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+ ClassName = Builder.CreateStructGEP(ClassName, 0);
+
+ std::vector<const llvm::Type*> Params(1, PtrToInt8Ty);
+ llvm::Constant *ClassLookupFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy,
+ Params,
+ true),
+ "objc_lookup_class");
+ return Builder.CreateCall(ClassLookupFn, ClassName);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+ llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
+ if (US == 0)
+ US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::InternalLinkage,
+ ".objc_untyped_selector_alias",
+ NULL, &TheModule);
+
+ return Builder.CreateLoad(US);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+
+ std::string SelName = Method->getSelector().getAsString();
+ std::string SelTypes;
+ CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ // Typed selectors
+ TypedSelector Selector = TypedSelector(SelName,
+ SelTypes);
+
+ // If it's already cached, return it.
+ if (TypedSelectors[Selector])
+ {
+ return Builder.CreateLoad(TypedSelectors[Selector]);
+ }
+
+ // If it isn't, cache it.
+ llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
+ llvm::PointerType::getUnqual(SelectorTy),
+ llvm::GlobalValue::InternalLinkage, SelName,
+ NULL, &TheModule);
+ TypedSelectors[Selector] = Sel;
+
+ return Builder.CreateLoad(Sel);
+}
+
+llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
+ const std::string &Name) {
+ llvm::Constant * ConstStr = llvm::ConstantArray::get(Str);
+ ConstStr = new llvm::GlobalVariable(ConstStr->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ ConstStr, Name, &TheModule);
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ return new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name, &TheModule);
+}
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
+ std::vector<llvm::Constant*> &V, const std::string &Name) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ return new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name, &TheModule);
+}
+
+/// Generate an NSConstantString object.
+//TODO: In case there are any crazy people still using the GNU runtime without
+//an OpenStep implementation, this should let them select their own class for
+//constant strings.
+llvm::Constant *CGObjCGNU::GenerateConstantString(const ObjCStringLiteral *SL) {
+ std::string Str(SL->getString()->getStrData(),
+ SL->getString()->getByteLength());
+ std::vector<llvm::Constant*> Ivars;
+ Ivars.push_back(NULLPtr);
+ Ivars.push_back(MakeConstantString(Str));
+ Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+ llvm::Constant *ObjCStr = MakeGlobal(
+ llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ Ivars, ".objc_str");
+ ConstantStrings.push_back(
+ llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty));
+ return ObjCStr;
+}
+
+///Generates a message send where the super is the receiver. This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs) {
+ llvm::Value *cmd = GetSelector(CGF.Builder, Sel);
+
+ CallArgList ActualArgs;
+
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
+ CGF.getContext().getObjCIdType()));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
+ const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false);
+
+ llvm::Value *ReceiverClass = 0;
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = 0;
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, Params, true), "objc_get_class");
+ }
+ ReceiverClass = CGF.Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
+ } else {
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime load
+ // function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_class_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
+ }
+ }
+ // Cast the pointer to a simplified version of the class structure
+ ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(llvm::StructType::get(IdTy, IdTy, NULL)));
+ // Get the superclass pointer
+ ReceiverClass = CGF.Builder.CreateStructGEP(ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass = CGF.Builder.CreateLoad(ReceiverClass);
+ // Construct the structure used to look up the IMP
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(Receiver->getType(),
+ IdTy, NULL);
+ llvm::Value *ObjCSuper = CGF.Builder.CreateAlloca(ObjCSuperTy);
+
+ CGF.Builder.CreateStore(Receiver, CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+ CGF.Builder.CreateStore(ReceiverClass,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ // Get the IMP
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
+ Params.push_back(SelectorTy);
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup_super");
+
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+ llvm::Value *imp = CGF.Builder.CreateCall(lookupFunction, lookupArgs,
+ lookupArgs+2);
+
+ return CGF.EmitCall(FnInfo, imp, ActualArgs);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ llvm::Value *cmd;
+ if (Method)
+ cmd = GetSelector(CGF.Builder, Method);
+ else
+ cmd = GetSelector(CGF.Builder, Sel);
+ CallArgList ActualArgs;
+
+ ActualArgs.push_back(
+ std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
+ CGF.getContext().getObjCIdType()));
+ ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
+ const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false);
+
+ llvm::Value *imp;
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Receiver->getType());
+ Params.push_back(SelectorTy);
+ // For sender-aware dispatch, we pass the sender as the third argument to a
+ // lookup function. When sending messages from C code, the sender is nil.
+ // objc_msg_lookup_sender(id receiver, SEL selector, id sender);
+ if (CGM.getContext().getLangOptions().ObjCSenderDispatch) {
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+ Params.push_back(self->getType());
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup_sender");
+
+ imp = CGF.Builder.CreateCall3(lookupFunction, Receiver, cmd, self);
+ } else {
+ llvm::Constant *lookupFunction =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::PointerType::getUnqual(impType), Params, true),
+ "objc_msg_lookup");
+
+ imp = CGF.Builder.CreateCall2(lookupFunction, Receiver, cmd);
+ }
+
+ return CGF.EmitCall(FnInfo, imp, ActualArgs);
+}
+
+/// Generates a MethodList. Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ bool isClassMethodList) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ llvm::PointerType::getUnqual(IMPTy), //Method pointer
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ Elements.clear();
+ if (llvm::Constant *Method =
+ TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+ MethodSels[i].getAsString(),
+ isClassMethodList))) {
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(MethodSels[i].getAsString());
+ Elements.push_back(llvm::ConstantExpr::getGetElementPtr(C, Zeros, 2));
+ Elements.push_back(
+ llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ llvm::PointerType::getUnqual(IMPTy));
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+ }
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+ Methods.size());
+ llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+
+ // Structure containing list pointer, array and array count
+ llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
+ llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get();
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::get(NextPtrTy,
+ IntTy,
+ ObjCMethodArrayTy,
+ NULL);
+ // Refine next pointer type to concrete type
+ llvm::cast<llvm::OpaqueType>(
+ OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy);
+ ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
+
+ Methods.clear();
+ Methods.push_back(llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(ObjCMethodListTy)));
+ Methods.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ MethodTypes.size()));
+ Methods.push_back(MethodArray);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+}
+
+/// Generates an IvarList. Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::GenerateIvarList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
+ const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ IntTy,
+ NULL);
+ std::vector<llvm::Constant*> Ivars;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarNames[i],
+ Zeros, 2));
+ Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarTypes[i],
+ Zeros, 2));
+ Elements.push_back(IvarOffsets[i]);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+ IvarNames.size());
+
+
+ Elements.clear();
+ Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+ Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+ // Structure containing array and array count
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
+ ObjCIvarArrayTy,
+ NULL);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols) {
+ // Set up the class structure
+ // Note: Several of these are char*s when they should be ids. This is
+ // because the runtime performs this translation on load.
+ llvm::StructType *ClassTy = llvm::StructType::get(
+ PtrToInt8Ty, // class_pointer
+ PtrToInt8Ty, // super_class
+ PtrToInt8Ty, // name
+ LongTy, // version
+ LongTy, // info
+ LongTy, // instance_size
+ IVars->getType(), // ivars
+ Methods->getType(), // methods
+ // These are all filled in by the runtime, so we pretend
+ PtrTy, // dtable
+ PtrTy, // subclass_list
+ PtrTy, // sibling_class
+ PtrTy, // protocols
+ PtrTy, // gc_object_type
+ NULL);
+ llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+ llvm::Constant *NullP =
+ llvm::ConstantPointerNull::get(PtrTy);
+ // Fill in the structure
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+ Elements.push_back(SuperClass);
+ Elements.push_back(MakeConstantString(Name, ".class_name"));
+ Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+ Elements.push_back(InstanceSize);
+ Elements.push_back(IVars);
+ Elements.push_back(Methods);
+ Elements.push_back(NullP);
+ Elements.push_back(NullP);
+ Elements.push_back(NullP);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+ Elements.push_back(NullP);
+ // Create an instance of the structure
+ return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name));
+}
+
+llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+ PtrToInt8Ty,
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back( llvm::ConstantExpr::getGetElementPtr(MethodNames[i],
+ Zeros, 2));
+ Elements.push_back(
+ llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+ }
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+ MethodNames.size());
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy, Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
+ IntTy, ObjCMethodArrayTy, NULL);
+ Methods.clear();
+ Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+ Methods.push_back(Array);
+ return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+}
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(
+ const llvm::SmallVectorImpl<std::string> &Protocols) {
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Protocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ LongTy,//FIXME: Should be size_t
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *protocol = ExistingProtocols[*iter];
+ if (!protocol)
+ protocol = GenerateEmptyProtocol(*iter);
+ llvm::Constant *Ptr =
+ llvm::ConstantExpr::getBitCast(protocol, PtrToInt8Ty);
+ Elements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Elements);
+ Elements.clear();
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+ Elements.push_back(ProtocolArray);
+ return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ const llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+ const std::string &ProtocolName) {
+ llvm::SmallVector<std::string, 0> EmptyStringVector;
+ llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ ASTContext &Context = CGM.getContext();
+ std::string ProtocolName = PD->getNameAsString();
+ llvm::SmallVector<std::string, 16> Protocols;
+ for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
+ E = PD->protocol_end(); PI != E; ++PI)
+ Protocols.push_back((*PI)->getNameAsString());
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(Context),
+ E = PD->instmeth_end(Context); iter != E; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
+ InstanceMethodNames.push_back(
+ CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ // Collect information about class methods:
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCProtocolDecl::classmeth_iterator
+ iter = PD->classmeth_begin(Context),
+ endIter = PD->classmeth_end(Context) ; iter != endIter ; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodNames.push_back(
+ CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ ExistingProtocols[ProtocolName] =
+ llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+ ".objc_protocol"), IdTy);
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ std::string CategoryName = OCD->getNameAsString();
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ iter = OCD->instmeth_begin(CGM.getContext()),
+ endIter = OCD->instmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ iter = OCD->classmeth_begin(CGM.getContext()),
+ endIter = OCD->classmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCInterfaceDecl *ClassDecl = OCD->getClassInterface();
+ const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+ false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+ PtrTy));
+ // Protocol list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(
+ GenerateProtocolList(Protocols), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, PtrTy,
+ PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ std::string SuperClassName;
+ if (SuperClassDecl)
+ SuperClassName = SuperClassDecl->getNameAsString();
+
+ // Get the class name
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+
+ // Get the size of instances.
+ int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
+
+ // Collect information about instance variables.
+ llvm::SmallVector<llvm::Constant*, 16> IvarNames;
+ llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
+ llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
+ // For non-fragile ivars, set the instance size to 0 - {the size of just this
+ // class}. The runtime will then set this to the correct value on load.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ instanceSize = 0 - (instanceSize - superInstanceSize);
+ }
+ for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
+ endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+ // Store the name
+ IvarNames.push_back(CGM.GetAddrOfConstantCString((*iter)
+ ->getNameAsString()));
+ // Get the type encoding for this ivar
+ std::string TypeStr;
+ Context.getObjCEncodingForType((*iter)->getType(), TypeStr);
+ IvarTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ // Get the offset
+ uint64_t Offset;
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter) -
+ superInstanceSize;
+ ObjCIvarOffsetVariable(ClassDecl, *iter);
+ } else {
+ Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter);
+ }
+ IvarOffsets.push_back(
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset));
+ }
+
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 16> InstanceMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCImplementationDecl::instmeth_iterator
+ iter = OID->instmeth_begin(CGM.getContext()),
+ endIter = OID->instmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ for (ObjCImplDecl::propimpl_iterator
+ iter = OID->propimpl_begin(CGM.getContext()),
+ endIter = OID->propimpl_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ InstanceMethodSels.push_back(getter->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ InstanceMethodSels.push_back(setter->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ }
+
+ // Collect information about class methods
+ llvm::SmallVector<Selector, 16> ClassMethodSels;
+ llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCImplementationDecl::classmeth_iterator
+ iter = OID->classmeth_begin(CGM.getContext()),
+ endIter = OID->classmeth_end(CGM.getContext());
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ }
+ // Collect the names of referenced protocols
+ llvm::SmallVector<std::string, 16> Protocols;
+ const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+
+
+ // Get the superclass pointer.
+ llvm::Constant *SuperClass;
+ if (!SuperClassName.empty()) {
+ SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+ } else {
+ SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ }
+ // Empty vector used to construct empty method lists
+ llvm::SmallVector<llvm::Constant*, 1> empty;
+ // Generate the method and instance variable lists
+ llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+ InstanceMethodSels, InstanceMethodTypes, false);
+ llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+ ClassMethodSels, ClassMethodTypes, true);
+ llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+ IvarOffsets);
+ //Generate metaclass for class methods
+ llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+ NULLPtr, 0x2L, /*name*/"", 0, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr);
+
+ // Generate the class structure
+ llvm::Constant *ClassStruct =
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x1L,
+ ClassName.c_str(), 0,
+ llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+ MethodList, GenerateProtocolList(Protocols));
+
+ // Resolve the class aliases, if they exist.
+ if (ClassPtrAlias) {
+ ClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias = 0;
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->setAliasee(
+ llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias = 0;
+ }
+
+ // Add class structure to list to be added to the symtab later
+ ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+ Classes.push_back(ClassStruct);
+}
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+ // Only emit an ObjC load function if no Objective-C stuff has been called
+ if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+ ExistingProtocols.empty() && TypedSelectors.empty() &&
+ UntypedSelectors.empty())
+ return NULL;
+
+ const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ SelectorTy->getElementType());
+ const llvm::Type *SelStructPtrTy = SelectorTy;
+ bool isSelOpaque = false;
+ if (SelStructTy == 0) {
+ SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
+ SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ isSelOpaque = true;
+ }
+
+ // Name the ObjC types to make the IR a bit easier to read
+ TheModule.addTypeName(".objc_selector", SelStructPtrTy);
+ TheModule.addTypeName(".objc_id", IdTy);
+ TheModule.addTypeName(".objc_imp", IMPTy);
+
+ std::vector<llvm::Constant*> Elements;
+ llvm::Constant *Statics = NULLPtr;
+ // Generate statics list:
+ if (ConstantStrings.size()) {
+ llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ ConstantStrings.size() + 1);
+ ConstantStrings.push_back(NULLPtr);
+ Elements.push_back(MakeConstantString("NSConstantString",
+ ".objc_static_class_name"));
+ Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+ ConstantStrings));
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::Type *StaticsListPtrTy = llvm::PointerType::getUnqual(StaticsListTy);
+ Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType::get(StaticsListPtrTy, 2);
+ Elements.clear();
+ Elements.push_back(Statics);
+ Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+ Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+ }
+ // Array of classes, categories, and constant objects
+ llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Classes.size() + Categories.size() + 2);
+ llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
+ llvm::Type::Int16Ty,
+ llvm::Type::Int16Ty,
+ ClassListTy, NULL);
+
+ Elements.clear();
+ // Pointer to an array of selectors used in this module.
+ std::vector<llvm::Constant*> Selectors;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end();
+ iter != iterEnd ; ++iter) {
+ Elements.push_back(MakeConstantString(iter->first.first, ".objc_sel_name"));
+ Elements.push_back(MakeConstantString(iter->first.second,
+ ".objc_sel_types"));
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ Elements.push_back(
+ MakeConstantString(iter->getKeyData(), ".objc_sel_name"));
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ }
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+ // Number of static selectors
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() ));
+ llvm::Constant *SelectorList = MakeGlobal(
+ llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
+ ".objc_selector_list");
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ SelStructPtrTy));
+
+ // Now that all of the static selectors exist, create pointers to them.
+ int index = 0;
+ for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+ iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
+ iter != iterEnd; ++iter) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy,
+ true, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ ".objc_sel_ptr", &TheModule);
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->setAliasee(SelPtr);
+ }
+ for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+ iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+ iter != iterEnd; iter++) {
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy, true,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ ".objc_sel_ptr", &TheModule);
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ if (isSelOpaque) {
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+ llvm::PointerType::getUnqual(SelectorTy));
+ }
+ (*iter).second->setAliasee(SelPtr);
+ }
+ // Number of classes defined.
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
+ Classes.size()));
+ // Number of categories defined
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
+ Categories.size()));
+ // Create an array of classes, then categories, then static object instances
+ Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+ // NULL-terminated list of static object instances (mainly constant strings)
+ Classes.push_back(Statics);
+ Classes.push_back(NULLPtr);
+ llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+ Elements.push_back(ClassList);
+ // Construct the symbol table
+ llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+
+ // The symbol table is contained in a module which has some version-checking
+ // constants
+ llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
+ Elements.clear();
+ // Runtime version used for compatibility checking.
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ NonFragileRuntimeVersion));
+ } else {
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+ }
+ // sizeof(ModuleTy)
+ llvm::TargetData td = llvm::TargetData::TargetData(&TheModule);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, td.getTypeSizeInBits(ModuleTy)/8));
+ //FIXME: Should be the path to the file where this module was declared
+ Elements.push_back(NULLPtr);
+ Elements.push_back(SymTab);
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+
+ // Create the load function calling the runtime entry point with the module
+ // structure
+ std::vector<const llvm::Type*> VoidArgs;
+ llvm::Function * LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::VoidTy, VoidArgs, false),
+ llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+ &TheModule);
+ llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create("entry", LoadFunction);
+ CGBuilderTy Builder;
+ Builder.SetInsertPoint(EntryBB);
+
+ std::vector<const llvm::Type*> Params(1,
+ llvm::PointerType::getUnqual(ModuleTy));
+ llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::VoidTy, Params, true), "__objc_exec_class");
+ Builder.CreateCall(Register, Module);
+ Builder.CreateRetVoid();
+
+ return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ std::string CategoryName = OCD ? OCD->getNameAsString() : "";
+ std::string ClassName = OMD->getClassInterface()->getNameAsString();
+ std::string MethodName = OMD->getSelector().getAsString();
+ bool isClassMethod = !OMD->isInstanceMethod();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+ MethodName, isClassMethod);
+
+ llvm::Function *Method = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
+ return Method;
+}
+
+llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(BoolTy);
+ // void objc_getProperty (id, SEL, ptrdiff_t, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(IdTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getProperty"));
+}
+
+llvm::Function *CGObjCGNU::GetPropertySetFunction() {
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(IdTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setProperty"));
+}
+
+llvm::Function *CGObjCGNU::EnumerationMutationFunction() {
+ std::vector<const llvm::Type*> Params(1, IdTy);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::VoidTy, Params, true),
+ "objc_enumerationMutation"));
+}
+
+void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ // Pointer to the personality function
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ std::vector<const llvm::Type*>(), true),
+ "__gnu_objc_personality_v0");
+ Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(PtrTy);
+ llvm::Value *RethrowFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false), "_Unwind_Resume_or_Rethrow");
+
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow");
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // GNU runtime does not currently support @synchronized()
+ if (!isTry) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncEnter, SyncArg);
+ }
+
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ // Emit the statements in the @try {} block
+ CGF.setInvokeDest(TryHandler);
+
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+ // Jump to @finally if there is no exception
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the handlers
+ CGF.EmitBlock(TryHandler);
+
+ // Get the correct versions of the exception handling intrinsics
+ llvm::TargetData td = llvm::TargetData::TargetData(&TheModule);
+ int PointerWidth = td.getTypeSizeInBits(PtrTy);
+ assert((PointerWidth == 32 || PointerWidth == 64) &&
+ "Can't yet handle exceptions if pointers are not 32 or 64 bits");
+ llvm::Value *llvm_eh_exception =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector = PointerWidth == 32 ?
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i32) :
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i64);
+ llvm::Value *llvm_eh_typeid_for = PointerWidth == 32 ?
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i32) :
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i64);
+
+ // Exception object
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ llvm::SmallVector<llvm::Value*, 8> ESelArgs;
+ llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+
+ ESelArgs.push_back(Exc);
+ ESelArgs.push_back(Personality);
+
+ bool HasCatchAll = false;
+ // Only @try blocks are allowed @catch blocks, but both can have @finally
+ if (isTry) {
+ if (const ObjCAtCatchStmt* CatchStmt =
+ cast<ObjCAtTryStmt>(S).getCatchStmts()) {
+ CGF.setInvokeDest(CatchInCatch);
+
+ for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+ const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
+
+ // @catch() and @catch(id) both catch any ObjC exception
+ if (!CatchDecl || CGF.getContext().isObjCIdType(CatchDecl->getType())
+ || CatchDecl->getType()->isObjCQualifiedIdType()) {
+ // Use i8* null here to signal this is a catch all, not a cleanup.
+ ESelArgs.push_back(NULLPtr);
+ HasCatchAll = true;
+ // No further catches after this one will ever by reached
+ break;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const PointerType *PT = CatchDecl->getType()->getAsPointerType();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT =
+ PT->getPointeeType()->getAsObjCInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ llvm::Value *EHType =
+ MakeConstantString(IT->getDecl()->getNameAsString());
+ ESelArgs.push_back(EHType);
+ }
+ }
+ }
+
+ // We use a cleanup unless there was already a catch all.
+ if (!HasCatchAll) {
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ }
+
+ // Find which handler was matched.
+ llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector,
+ ESelArgs.begin(), ESelArgs.end(), "selector");
+
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ const ParmVarDecl *CatchParam = Handlers[i].first;
+ const Stmt *CatchBody = Handlers[i].second;
+
+ llvm::BasicBlock *Next = 0;
+
+ // The last handler always matches.
+ if (i + 1 != e) {
+ assert(CatchParam && "Only last handler can be a catch all.");
+
+ // Test whether this block matches the type for the selector and branch
+ // to Match if it does, or to the next BB if it doesn't.
+ llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+ Next = CGF.createBasicBlock("catch.next");
+ llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for,
+ CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy));
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match,
+ Next);
+
+ CGF.EmitBlock(Match);
+ }
+
+ if (CatchBody) {
+ llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
+ CGF.ConvertType(CatchParam->getType()));
+
+ // Bind the catch parameter if it exists.
+ if (CatchParam) {
+ // CatchParam is a ParmVarDecl because of the grammar
+ // construction used to handle this, but for codegen purposes
+ // we treat this as a local decl.
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(ExcObject);
+ CGF.EmitStmt(CatchBody);
+ CGF.ObjCEHValueStack.pop_back();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ if (Next)
+ CGF.EmitBlock(Next);
+ } else {
+ assert(!Next && "catchup should be last handler.");
+
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+ // The @finally block is a secondary landing pad for any exceptions thrown in
+ // @catch() blocks
+ CGF.EmitBlock(CatchInCatch);
+ Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ ESelArgs.clear();
+ ESelArgs.push_back(Exc);
+ ESelArgs.push_back(Personality);
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
+ "selector");
+ CGF.Builder.CreateCall(llvm_eh_typeid_for,
+ CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy));
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.setInvokeDest(PrevLandingPad);
+
+ CGF.EmitBlock(FinallyBlock);
+
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+ // @synchronized.
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncExit, SyncArg);
+ }
+
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ // Branch around the rethrow code.
+ CGF.EmitBranch(FinallyEnd);
+
+ CGF.EmitBlock(FinallyRethrow);
+ CGF.Builder.CreateCall(RethrowFn, CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::Value *ThrowFn =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject = Exception;
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
+
+ // Note: This may have to be an invoke, if we want to support constructs like:
+ // @try {
+ // @throw(obj);
+ // }
+ // @catch(id) ...
+ //
+ // This is effectively turning @throw into an incredibly-expensive goto, but
+ // it may happen as a result of inlining followed by missed optimizations, or
+ // as a result of stupidity.
+ llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+ if (!UnwindBB) {
+ CGF.Builder.CreateCall(ThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateInvoke(ThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
+ &ExceptionAsObject+1);
+ }
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj)
+{
+ return 0;
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ return;
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ // Emit the variable and initialize it with what we think the correct value
+ // is. This allows code compiled with non-fragile ivars to work correctly
+ // when linked against code which isn't (most of the time).
+ llvm::GlobalVariable *IvarOffsetGV = CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+ llvm::ConstantInt *OffsetGuess =
+ llvm::ConstantInt::get(LongTy, Offset, "ivar");
+ IvarOffsetGV = new llvm::GlobalVariable(LongTy, false,
+ llvm::GlobalValue::CommonLinkage, OffsetGuess, Name, &TheModule);
+ }
+ return IvarOffsetGV;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD) {
+ for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(),
+ IVE = OID->ivar_end(); IVI != IVE; ++IVI)
+ if (OIVD == *IVI)
+ return OID;
+
+ // Also look in synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ Context.CollectSynthesizedIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (OIVD == Ivars[k])
+ return OID;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD);
+
+ return 0;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ if (CGF.getContext().getLangOptions().ObjCNonFragileABI)
+ {
+ Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),
+ false, "ivar");
+ }
+ uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(LongTy, Offset, "ivar");
+}
+
+CodeGen::CGObjCRuntime *CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM){
+ return new CGObjCGNU(CGM);
+}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..8f1404d
--- /dev/null
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,5780 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Target/TargetData.h"
+#include <sstream>
+
+using namespace clang;
+using namespace CodeGen;
+
+// Common CGObjCRuntime functions, these don't belong here, but they
+// don't belong in CGObjCRuntime either so we will live with it for
+// now.
+
+/// FindIvarInterface - Find the interface containing the ivar.
+///
+/// FIXME: We shouldn't need to do this, the containing context should
+/// be fixed.
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD,
+ unsigned &Index) {
+ // FIXME: The index here is closely tied to how
+ // ASTContext::getObjCLayout is implemented. This should be fixed to
+ // get the information from the layout directly.
+ Index = 0;
+ for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(),
+ IVE = OID->ivar_end(); IVI != IVE; ++IVI, ++Index)
+ if (OIVD == *IVI)
+ return OID;
+
+ // Also look in synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ Context.CollectSynthesizedIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+ if (OIVD == Ivars[k])
+ return OID;
+ ++Index;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD, Index);
+
+ return 0;
+}
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ unsigned Index;
+ const ObjCInterfaceDecl *Container =
+ FindIvarInterface(CGM.getContext(), OID, Ivar, Index);
+ assert(Container && "Unable to find ivar container");
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && ID->getClassInterface() == Container)
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 8;
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 8;
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ QualType IvarTy = Ivar->getType();
+ const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+ V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+ if (Ivar->isBitField()) {
+ // We need to compute the bit offset for the bit-field, the offset
+ // is to the byte. Note, there is a subtle invariant here: we can
+ // only call this routine on non-sythesized ivars but we may be
+ // called for synthesized ivars. However, a synthesized ivar can
+ // never be a bit-field so this is safe.
+ uint64_t BitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar) % 8;
+
+ uint64_t BitFieldSize =
+ Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
+ return LValue::MakeBitfield(V, BitOffset, BitFieldSize,
+ IvarTy->isSignedIntegerType(),
+ IvarTy.getCVRQualifiers()|CVRQualifiers);
+ }
+
+ LValue LV = LValue::MakeAddr(V, IvarTy.getCVRQualifiers()|CVRQualifiers,
+ CGF.CGM.getContext().getObjCGCAttrKind(IvarTy));
+ LValue::SetObjCIvar(LV, true);
+ return LV;
+}
+
+///
+
+namespace {
+
+ typedef std::vector<llvm::Constant*> ConstantVector;
+
+ // FIXME: We should find a nicer way to make the labels for metadata, string
+ // concatenation is lame.
+
+class ObjCCommonTypesHelper {
+private:
+ llvm::Constant *getMessageSendFn() const {
+ // id objc_msgSend (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend");
+ }
+
+ llvm::Constant *getMessageSendStretFn() const {
+ // id objc_msgSend_stret (id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, true),
+ "objc_msgSend_stret");
+
+ }
+
+ llvm::Constant *getMessageSendFpretFn() const {
+ // FIXME: This should be long double on x86_64?
+ // [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::DoubleTy,
+ Params,
+ true),
+ "objc_msgSend_fpret");
+
+ }
+
+ llvm::Constant *getMessageSendSuperFn() const {
+ // id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperFn2() const {
+ // id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ const char *SuperName = "objc_msgSendSuper2";
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ SuperName);
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn() const {
+ // void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, true),
+ "objc_msgSendSuper_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperStretFn2() const {
+ // void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ // SEL op, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SelectorPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, true),
+ "objc_msgSendSuper2_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn();
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn2() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn2();
+ }
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+
+public:
+ const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ const llvm::Type *Int8PtrTy;
+
+ /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+ const llvm::Type *ObjectPtrTy;
+
+ /// PtrObjectPtrTy - LLVM type for id *
+ const llvm::Type *PtrObjectPtrTy;
+
+ /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+ const llvm::Type *SelectorPtrTy;
+ /// ProtocolPtrTy - LLVM type for external protocol handles
+ /// (typeof(Protocol))
+ const llvm::Type *ExternalProtocolPtrTy;
+
+ // SuperCTy - clang type for struct objc_super.
+ QualType SuperCTy;
+ // SuperPtrCTy - clang type for struct objc_super *.
+ QualType SuperPtrCTy;
+
+ /// SuperTy - LLVM type for struct objc_super.
+ const llvm::StructType *SuperTy;
+ /// SuperPtrTy - LLVM type for struct objc_super *.
+ const llvm::Type *SuperPtrTy;
+
+ /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+ /// in GCC parlance).
+ const llvm::StructType *PropertyTy;
+
+ /// PropertyListTy - LLVM type for struct objc_property_list
+ /// (_prop_list_t in GCC parlance).
+ const llvm::StructType *PropertyListTy;
+ /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *PropertyListPtrTy;
+
+ // MethodTy - LLVM type for struct objc_method.
+ const llvm::StructType *MethodTy;
+
+ /// CacheTy - LLVM type for struct objc_cache.
+ const llvm::Type *CacheTy;
+ /// CachePtrTy - LLVM type for struct objc_cache *.
+ const llvm::Type *CachePtrTy;
+
+ llvm::Constant *getGetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+ llvm::SmallVector<QualType,16> Params;
+ QualType IdType = Ctx.getObjCIdType();
+ QualType SelType = Ctx.getObjCSelType();
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(IdType, Params), false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+ }
+
+ llvm::Constant *getSetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ llvm::SmallVector<QualType,16> Params;
+ QualType IdType = Ctx.getObjCIdType();
+ QualType SelType = Ctx.getObjCSelType();
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params), false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+ }
+
+ llvm::Constant *getEnumerationMutationFn() {
+ // void objc_enumerationMutation (id)
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ }
+
+ /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+ llvm::Constant *getGcReadWeakFn() {
+ // id objc_read_weak (id *)
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ }
+
+ /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+ llvm::Constant *getGcAssignWeakFn() {
+ // id objc_assign_weak (id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ }
+
+ /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+ llvm::Constant *getGcAssignGlobalFn() {
+ // id objc_assign_global(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ }
+
+ /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+ llvm::Constant *getGcAssignIvarFn() {
+ // id objc_assign_ivar(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ }
+
+ /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+ llvm::Constant *getGcAssignStrongCastFn() {
+ // id objc_assign_global(id, id *)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ Args.push_back(ObjectPtrTy->getPointerTo());
+ llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ }
+
+ /// ExceptionThrowFn - LLVM objc_exception_throw function.
+ llvm::Constant *getExceptionThrowFn() {
+ // void objc_exception_throw(id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+ }
+
+ /// SyncEnterFn - LLVM object_sync_enter function.
+ llvm::Constant *getSyncEnterFn() {
+ // void objc_sync_enter (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ }
+
+ /// SyncExitFn - LLVM object_sync_exit function.
+ llvm::Constant *getSyncExitFn() {
+ // void objc_sync_exit (id)
+ std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ }
+
+ llvm::Constant *getSendFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendStretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+ }
+
+ ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCCommonTypesHelper(){}
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+ /// SymtabTy - LLVM type for struct objc_symtab.
+ const llvm::StructType *SymtabTy;
+ /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+ const llvm::Type *SymtabPtrTy;
+ /// ModuleTy - LLVM type for struct objc_module.
+ const llvm::StructType *ModuleTy;
+
+ /// ProtocolTy - LLVM type for struct objc_protocol.
+ const llvm::StructType *ProtocolTy;
+ /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+ const llvm::Type *ProtocolPtrTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension.
+ const llvm::StructType *ProtocolExtensionTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension *.
+ const llvm::Type *ProtocolExtensionPtrTy;
+ /// MethodDescriptionTy - LLVM type for struct
+ /// objc_method_description.
+ const llvm::StructType *MethodDescriptionTy;
+ /// MethodDescriptionListTy - LLVM type for struct
+ /// objc_method_description_list.
+ const llvm::StructType *MethodDescriptionListTy;
+ /// MethodDescriptionListPtrTy - LLVM type for struct
+ /// objc_method_description_list *.
+ const llvm::Type *MethodDescriptionListPtrTy;
+ /// ProtocolListTy - LLVM type for struct objc_property_list.
+ const llvm::Type *ProtocolListTy;
+ /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+ const llvm::Type *ProtocolListPtrTy;
+ /// CategoryTy - LLVM type for struct objc_category.
+ const llvm::StructType *CategoryTy;
+ /// ClassTy - LLVM type for struct objc_class.
+ const llvm::StructType *ClassTy;
+ /// ClassPtrTy - LLVM type for struct objc_class *.
+ const llvm::Type *ClassPtrTy;
+ /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+ const llvm::StructType *ClassExtensionTy;
+ /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+ const llvm::Type *ClassExtensionPtrTy;
+ // IvarTy - LLVM type for struct objc_ivar.
+ const llvm::StructType *IvarTy;
+ /// IvarListTy - LLVM type for struct objc_ivar_list.
+ const llvm::Type *IvarListTy;
+ /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+ const llvm::Type *IvarListPtrTy;
+ /// MethodListTy - LLVM type for struct objc_method_list.
+ const llvm::Type *MethodListTy;
+ /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+ const llvm::Type *MethodListPtrTy;
+
+ /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+ const llvm::Type *ExceptionDataTy;
+
+ /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+ llvm::Constant *getExceptionTryEnterFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "objc_exception_try_enter");
+ }
+
+ /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+ llvm::Constant *getExceptionTryExitFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "objc_exception_try_exit");
+ }
+
+ /// ExceptionExtractFn - LLVM objc_exception_extract function.
+ llvm::Constant *getExceptionExtractFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, false),
+ "objc_exception_extract");
+
+ }
+
+ /// ExceptionMatchFn - LLVM objc_exception_match function.
+ llvm::Constant *getExceptionMatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ClassPtrTy);
+ Params.push_back(ObjectPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ Params, false),
+ "objc_exception_match");
+
+ }
+
+ /// SetJmpFn - LLVM _setjmp function.
+ llvm::Constant *getSetJmpFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(llvm::PointerType::getUnqual(llvm::Type::Int32Ty));
+ return
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ Params, false),
+ "_setjmp");
+
+ }
+
+public:
+ ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCTypesHelper() {}
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+ // MethodListnfABITy - LLVM for struct _method_list_t
+ const llvm::StructType *MethodListnfABITy;
+
+ // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+ const llvm::Type *MethodListnfABIPtrTy;
+
+ // ProtocolnfABITy = LLVM for struct _protocol_t
+ const llvm::StructType *ProtocolnfABITy;
+
+ // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+ const llvm::Type *ProtocolnfABIPtrTy;
+
+ // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+ const llvm::StructType *ProtocolListnfABITy;
+
+ // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+ const llvm::Type *ProtocolListnfABIPtrTy;
+
+ // ClassnfABITy - LLVM for struct _class_t
+ const llvm::StructType *ClassnfABITy;
+
+ // ClassnfABIPtrTy - LLVM for struct _class_t*
+ const llvm::Type *ClassnfABIPtrTy;
+
+ // IvarnfABITy - LLVM for struct _ivar_t
+ const llvm::StructType *IvarnfABITy;
+
+ // IvarListnfABITy - LLVM for struct _ivar_list_t
+ const llvm::StructType *IvarListnfABITy;
+
+ // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+ const llvm::Type *IvarListnfABIPtrTy;
+
+ // ClassRonfABITy - LLVM for struct _class_ro_t
+ const llvm::StructType *ClassRonfABITy;
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ const llvm::Type *ImpnfABITy;
+
+ // CategorynfABITy - LLVM for struct _category_t
+ const llvm::StructType *CategorynfABITy;
+
+ // New types for nonfragile abi messaging.
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *MessageRefTy;
+ // MessageRefCTy - clang type for struct _message_ref_t
+ QualType MessageRefCTy;
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ const llvm::Type *MessageRefPtrTy;
+ // MessageRefCPtrTy - clang type for struct _message_ref_t*
+ QualType MessageRefCPtrTy;
+
+ // MessengerTy - Type of the messenger (shown as IMP above)
+ const llvm::FunctionType *MessengerTy;
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ const llvm::StructType *SuperMessageRefTy;
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ const llvm::Type *SuperMessageRefPtrTy;
+
+ llvm::Constant *getMessageSendFixupFn() {
+ // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fixup");
+ }
+
+ llvm::Constant *getMessageSendFpretFixupFn() {
+ // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_fpret_fixup");
+ }
+
+ llvm::Constant *getMessageSendStretFixupFn() {
+ // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend_stret_fixup");
+ }
+
+ llvm::Constant *getMessageSendIdFixupFn() {
+ // id objc_msgSendId_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendId_fixup");
+ }
+
+ llvm::Constant *getMessageSendIdStretFixupFn() {
+ // id objc_msgSendId_stret_fixup(id, struct message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(MessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendId_stret_fixup");
+ }
+ llvm::Constant *getMessageSendSuper2FixupFn() {
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(SuperPtrTy);
+ Params.push_back(SuperMessageRefPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSendSuper2_stret_fixup");
+ }
+
+
+
+ /// EHPersonalityPtr - LLVM value for an i8* to the Objective-C
+ /// exception personality function.
+ llvm::Value *getEHPersonalityPtr() {
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ std::vector<const llvm::Type*>(),
+ true),
+ "__objc_personality_v0");
+ return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy);
+ }
+
+ llvm::Constant *getUnwindResumeOrRethrowFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "_Unwind_Resume_or_Rethrow");
+ }
+
+ llvm::Constant *getObjCEndCatchFn() {
+ std::vector<const llvm::Type*> Params;
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ Params, false),
+ "objc_end_catch");
+
+ }
+
+ llvm::Constant *getObjCBeginCatchFn() {
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Int8PtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+ Params, false),
+ "objc_begin_catch");
+ }
+
+ const llvm::StructType *EHTypeTy;
+ const llvm::Type *EHTypePtrTy;
+
+ ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCNonFragileABITypesHelper(){}
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+ // FIXME - accessibility
+ class GC_IVAR {
+ public:
+ unsigned ivar_bytepos;
+ unsigned ivar_size;
+ GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
+ : ivar_bytepos(bytepos), ivar_size(size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const GC_IVAR &b) const {
+ return ivar_bytepos < b.ivar_bytepos;
+ }
+ };
+
+ class SKIP_SCAN {
+ public:
+ unsigned skip;
+ unsigned scan;
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ : skip(_skip), scan(_scan) {}
+ };
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+ // FIXME! May not be needing this after all.
+ unsigned ObjCABI;
+
+ // gc ivar layout bitmap calculation helper caches.
+ llvm::SmallVector<GC_IVAR, 16> SkipIvars;
+ llvm::SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ /// LazySymbols - Symbols to generate a lazy reference for. See
+ /// DefinedSymbols and FinishModule().
+ std::set<IdentifierInfo*> LazySymbols;
+
+ /// DefinedSymbols - External symbols which are defined by this
+ /// module. The symbols in this list and LazySymbols are used to add
+ /// special linker symbols which ensure that Objective-C modules are
+ /// linked properly.
+ std::set<IdentifierInfo*> DefinedSymbols;
+
+ /// ClassNames - uniqued class names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
+
+ /// MethodVarNames - uniqued method variable names.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+ /// MethodVarTypes - uniqued method type signatures. We have to use
+ /// a StringMap here because have no other unique reference.
+ llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+ /// MethodDefinitions - map of methods which have been defined in
+ /// this translation unit.
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+ /// PropertyNames - uniqued method variable names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+ /// ClassReferences - uniqued class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+ /// SelectorReferences - uniqued selector references.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+ /// Protocols - Protocols for which an objc_protocol structure has
+ /// been emitted. Forward declarations are handled by creating an
+ /// empty structure whose initializer is filled in when/if defined.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+ /// DefinedProtocols - Protocols which have actually been
+ /// defined. We should not need this, see FIXME in GenerateProtocol.
+ llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+ /// DefinedClasses - List of defined classes.
+ std::vector<llvm::GlobalValue*> DefinedClasses;
+
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
+
+ /// DefinedCategories - List of defined categories.
+ std::vector<llvm::GlobalValue*> DefinedCategories;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
+
+ /// UsedGlobals - List of globals to pack into the llvm.used metadata
+ /// to prevent them from being clobbered.
+ std::vector<llvm::GlobalVariable*> UsedGlobals;
+
+ /// GetNameForMethod - Return a name for the given method.
+ /// \param[out] NameOut - The return value.
+ void GetNameForMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ std::string &NameOut);
+
+ /// GetMethodVarName - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+ llvm::Constant *GetMethodVarName(Selector Sel);
+ llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+ llvm::Constant *GetMethodVarName(const std::string &Name);
+
+ /// GetMethodVarType - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+
+ // FIXME: This is a horrible name.
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
+ llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+ /// GetPropertyName - Return a unique constant for the given
+ /// name. The return value has type char *.
+ llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+ // FIXME: This can be dropped once string functions are unified.
+ llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container);
+
+ /// GetClassName - Return a unique constant for the given selector's
+ /// name. The return value has type char *.
+ llvm::Constant *GetClassName(IdentifierInfo *Ident);
+
+ /// BuildIvarLayout - Builds ivar layout bitmap for the class
+ /// implementation for the __strong or __weak case.
+ ///
+ llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+ bool ForStrongLayout);
+
+ void BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+ void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+
+ /// GetIvarLayoutName - Returns a unique constant for the given
+ /// ivar layout bitmap.
+ llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitPropertyList - Emit the given property list. The return
+ /// value has type PropertyListPtrTy.
+ llvm::Constant *EmitPropertyList(const std::string &Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// GetProtocolRef - Return a reference to the internal protocol
+ /// description, creating an empty one if it has not been
+ /// defined. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// CreateMetadataVar - Create a global variable with internal
+ /// linkage for use by the Objective-C runtime.
+ ///
+ /// This is a convenience wrapper which not only creates the
+ /// variable, but also sets the section and alignment and adds the
+ /// global to the UsedGlobals list.
+ ///
+ /// \param Name - The variable name.
+ /// \param Init - The variable initializer; this is also used to
+ /// define the type of the variable.
+ /// \param Section - The section the variable should go into, or 0.
+ /// \param Align - The alignment for the variable, or 0.
+ /// \param AddToUsed - Whether the variable should be added to
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(const std::string &Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed);
+
+ /// GetNamedIvarList - Return the list of ivars in the interface
+ /// itself (not including super classes and not including unnamed
+ /// bitfields).
+ ///
+ /// For the non-fragile ABI, this also includes synthesized property
+ /// ivars.
+ void GetNamedIvarList(const ObjCInterfaceDecl *OID,
+ llvm::SmallVector<ObjCIvarDecl*, 16> &Res) const;
+
+ CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+public:
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) : CGM(cgm)
+ { }
+
+ virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *SL);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD=0);
+
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+ ObjCTypesHelper ObjCTypes;
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
+ /// EmitModuleInfo - Another marker encoding module level
+ /// information.
+ void EmitModuleInfo();
+
+ /// EmitModuleSymols - Emit module symbols, the list of defined
+ /// classes and categories. The result has type SymtabPtrTy.
+ llvm::Constant *EmitModuleSymbols();
+
+ /// FinishModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishModule();
+
+ /// EmitClassExtension - Generate the class extension structure used
+ /// to store the weak ivar layout and properties. The return value
+ /// has type ClassExtensionPtrTy.
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs);
+
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass);
+
+ /// EmitMetaClass - Emit a forward reference to the class structure
+ /// for the metaclass of the given interface. The return value has
+ /// type ClassPtrTy.
+ llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClass - Emit a class structure for the metaclass of the
+ /// given implementation. The return value has type ClassPtrTy.
+ llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListPtrTy.
+ llvm::Constant *EmitMethodList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// EmitMethodDescList - Emit a method description list for a list of
+ /// method declarations.
+ /// - TypeName: The name for the type containing the methods.
+ /// - IsProtocol: True iff these methods are for a protocol.
+ /// - ClassMethds: True iff these are class methods.
+ /// - Required: When true, only "required" methods are
+ /// listed. Similarly, when false only "optional" methods are
+ /// listed. For classes this should always be true.
+ /// - begin, end: The method list to output.
+ ///
+ /// The return value has type MethodDescriptionListPtrTy.
+ llvm::Constant *EmitMethodDescList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolExtension - Generate the protocol extension
+ /// structure used to store optional instance and class methods, and
+ /// protocol properties. The return value has type
+ /// ProtocolExtensionPtrTy.
+ llvm::Constant *
+ EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+ public:
+ CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method);
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+ ObjCNonFragileABITypesHelper ObjCTypes;
+ llvm::GlobalVariable* ObjCEmptyCacheVar;
+ llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+ /// SuperClassReferences - uniqued super class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+ /// MetaClassReferences - uniqued meta class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+ /// EHTypeReferences - uniqued class ehtype references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+ /// NonLegacyDispatchMethods - List of methods for which we do *not* generate
+ /// legacy messaging dispatch.
+ llvm::DenseSet<Selector> NonLegacyDispatchMethods;
+
+ /// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+ /// NonLegacyDispatchMethods; false otherwise.
+ bool LegacyDispatchedSelector(Selector Sel);
+
+ /// FinishNonFragileABIModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishNonFragileABIModule();
+
+ /// AddModuleClassList - Add the given list of class pointers to the
+ /// module with the provided symbol and section names.
+ void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container,
+ const char *SymbolName,
+ const char *SectionName);
+
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListnfABITy.
+ llvm::Constant *EmitMethodList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods);
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListnfABIPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+ llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int offset);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class reference.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given super class reference.
+ llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClassRef - Return a Value * of the address of _class_t
+ /// meta-data
+ llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+ /// the given ivar.
+ ///
+ llvm::GlobalVariable * ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+ /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+ /// interface. The return value has type EHTypePtrTy.
+ llvm::Value *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition);
+
+ const char *getMetaclassSymbolPrefix() const {
+ return "OBJC_METACLASS_$_";
+ }
+
+ const char *getClassSymbolPrefix() const {
+ return "OBJC_CLASS_$_";
+ }
+
+ void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize);
+
+ // Shamelessly stolen from Analysis/CFRefCount.cpp
+ Selector GetNullarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(0, &II);
+ }
+
+ Selector GetUnarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(1, &II);
+ }
+
+ /// ImplementationIsNonLazy - Check whether the given category or
+ /// class implementation is "non-lazy".
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+public:
+ CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+ // FIXME. All stubs for now!
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel)
+ { return EmitSelector(Builder, Sel); }
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method)
+ { return EmitSelector(Builder, Method->getSelector()); }
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+ }
+ virtual llvm::Constant *GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+ }
+ virtual llvm::Constant *EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+ }
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::Constant *C,
+ unsigned idx0,
+ unsigned idx1) {
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, idx0),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, idx1)
+ };
+ return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Super);
+ return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm)
+{
+ ObjCABI = 1;
+ EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+ return EmitSelector(Builder, Sel);
+}
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ return EmitSelector(Builder, Method->getSelector());
+}
+
+/// Generate a constant CFString object.
+/*
+ struct __builtin_CFString {
+ const int *isa; // point to __CFConstantStringClassReference
+ int flags;
+ const char *str;
+ long length;
+ };
+*/
+
+llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ const ObjCStringLiteral *SL) {
+ return CGM.GetAddrOfConstantCFString(SL->getString());
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs) {
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to 'super' in a class method defined in a category
+ // implementation requires an odd treatment.
+ // If we are in a class method, we must retrieve the
+ // _metaclass_ for the current class, pointed at by
+ // the class's "isa" pointer. The following assumes that
+ // isa" is the first ivar in a class (which it must be).
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ }
+ else {
+ llvm::Value *MetaClassPtr = EmitMetaClassRef(Class);
+ llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1);
+ llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ Target = Super;
+ }
+ } else {
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ }
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+ return EmitLegacyMessageSend(CGF, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ return EmitLegacyMessageSend(CGF, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, ObjCTypes);
+}
+
+CodeGen::RValue CGObjCCommonMac::EmitLegacyMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ CallArgList ActualArgs;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Sel),
+ CGF.getContext().getObjCSelType()));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
+ // In 64bit ABI, type must be assumed VARARG. In 32bit abi,
+ // it seems not to matter.
+ const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo, (ObjCABI == 2));
+
+ llvm::Constant *Fn = NULL;
+ if (CGM.ReturnTypeUsesSret(FnInfo)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
+ : ObjCTypes.getSendStretFn(IsSuper);
+ } else if (ResultType->isFloatingType()) {
+ if (ObjCABI == 2) {
+ if (const BuiltinType *BT = ResultType->getAsBuiltinType()) {
+ BuiltinType::Kind k = BT->getKind();
+ Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFn2(IsSuper);
+ }
+ }
+ else
+ // FIXME. This currently matches gcc's API for x86-32. May need to change
+ // for others if we have their API.
+ Fn = ObjCTypes.getSendFpretFn(IsSuper);
+ } else {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+ : ObjCTypes.getSendFn(IsSuper);
+ }
+ assert(Fn && "EmitLegacyMessageSend - unknown API");
+ Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo, Fn, ActualArgs);
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ // FIXME: We shouldn't need this, the protocol decl should contain enough
+ // information to tell us whether this was a declaration or a definition.
+ DefinedProtocols.insert(PD->getIdentifier());
+
+ // If we have generated a forward reference to this protocol, emit
+ // it now. Otherwise do nothing, the protocol objects are lazily
+ // emitted.
+ if (Protocols.count(PD->getIdentifier()))
+ GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+ if (DefinedProtocols.count(PD->getIdentifier()))
+ return GetOrEmitProtocol(PD);
+ return GetOrEmitProtocolRef(PD);
+}
+
+/*
+ // APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+ struct _objc_protocol {
+ struct _objc_protocol_extension *isa;
+ char *protocol_name;
+ struct _objc_protocol_list *protocol_list;
+ struct _objc__method_prototype_list *instance_methods;
+ struct _objc__method_prototype_list *class_methods
+ };
+
+ See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ const char *ProtocolName = PD->getNameAsCString();
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(CGM.getContext()),
+ e = PD->instmeth_end(CGM.getContext()); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(CGM.getContext()),
+ e = PD->classmeth_end(CGM.getContext()); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] =
+ EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getNameAsString(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+ Values[3] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[4] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ std::string("\01L_OBJC_PROTOCOL_")+ProtocolName,
+ &CGM.getModule());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ Entry->setAlignment(4);
+ UsedGlobals.push_back(Entry);
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01L_OBJC_PROTOCOL_" + PD->getNameAsString(),
+ &CGM.getModule());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ Entry->setAlignment(4);
+ UsedGlobals.push_back(Entry);
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+/*
+ struct _objc_protocol_extension {
+ uint32_t size;
+ struct objc_method_description_list *optional_instance_methods;
+ struct objc_method_description_list *optional_class_methods;
+ struct objc_property_list *instance_properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ const ConstantVector &OptInstanceMethods,
+ const ConstantVector &OptClassMethods) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods);
+ Values[2] =
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods);
+ Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" +
+ PD->getNameAsString(),
+ 0, PD, ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+ Values[3]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+ // No special section, but goes in llvm.used
+ return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getNameAsString(),
+ Init,
+ 0, 0, true);
+}
+
+/*
+ struct objc_protocol_list {
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+ // Just return null for empty protocol lists
+ if (ProtocolRefs.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+ std::vector<llvm::Constant*> Values(3);
+ // This field is only used by the runtime.
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[2] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ 4, false);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+/*
+ struct _objc_property {
+ const char * const name;
+ const char * const attributes;
+ };
+
+ struct _objc_property_list {
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
+ };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(const std::string &Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ std::vector<llvm::Constant*> Properties, Prop(2);
+ for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(CGM.getContext()),
+ E = OCD->prop_end(CGM.getContext()); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ Prop[0] = GetPropertyName(PD->getIdentifier());
+ Prop[1] = GetPropertyTypeString(PD, Container);
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
+ Prop));
+ }
+
+ // Return null for empty list.
+ if (Properties.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+ unsigned PropertySize =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+ Properties.size());
+ Values[2] = llvm::ConstantArray::get(AT, Properties);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" :
+ "__OBJC,__property,regular,no_dead_strip",
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+/*
+ struct objc_method_description_list {
+ int count;
+ struct objc_method_description list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(2);
+ Desc[0] = llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+ Desc);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodDescList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+ Methods.size());
+ Values[1] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+ struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ uint32_t size; // <rdar://4585769>
+ struct _objc_property_list *instance_properties;
+ };
+ */
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+ // FIXME: This is poor design, the OCD should have a pointer to the category
+ // decl. Additionally, note that Category can be null for the @implementation
+ // w/o an @interface case. Sema should just create one for us as it does for
+ // @implementation so everyone else can live life under a clear blue sky.
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ std::string ExtName(Interface->getNameAsString() + "_" +
+ OCD->getNameAsString());
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(CGM.getContext()),
+ e = OCD->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(CGM.getContext()),
+ e = OCD->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ std::vector<llvm::Constant*> Values(7);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ Values[1] = GetClassName(Interface->getIdentifier());
+ LazySymbols.insert(Interface->getIdentifier());
+ Values[2] =
+ EmitMethodList(std::string("\01L_OBJC_CATEGORY_INSTANCE_METHODS_") +
+ ExtName,
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] =
+ EmitMethodList(std::string("\01L_OBJC_CATEGORY_CLASS_METHODS_") + ExtName,
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ if (Category) {
+ Values[4] =
+ EmitProtocolList(std::string("\01L_OBJC_CATEGORY_PROTOCOLS_") + ExtName,
+ Category->protocol_begin(),
+ Category->protocol_end());
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ }
+ Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+ // If there is no category @interface then there can be no properties.
+ if (Category) {
+ Values[6] = EmitPropertyList(std::string("\01l_OBJC_$_PROP_LIST_") + ExtName,
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(std::string("\01L_OBJC_CATEGORY_")+ExtName, Init,
+ "__OBJC,__category,regular,no_dead_strip",
+ 4, true);
+ DefinedCategories.push_back(GV);
+}
+
+// FIXME: Get from somewhere?
+enum ClassFlags {
+ eClassFlags_Factory = 0x00001,
+ eClassFlags_Meta = 0x00002,
+ // <rdr://5142207>
+ eClassFlags_HasCXXStructors = 0x02000,
+ eClassFlags_Hidden = 0x20000,
+ eClassFlags_ABI2_Hidden = 0x00010,
+ eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+};
+
+/*
+ struct _objc_class {
+ Class isa;
+ Class super_class;
+ const char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct _objc_cache *cache;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions (<rdr://4585769>)
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+
+ See EmitClassExtension();
+ */
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ DefinedSymbols.insert(ID->getIdentifier());
+
+ std::string ClassName = ID->getNameAsString();
+ // FIXME: Gross
+ ObjCInterfaceDecl *Interface =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ llvm::Constant *Protocols =
+ EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getNameAsString(),
+ Interface->protocol_begin(),
+ Interface->protocol_end());
+ unsigned Flags = eClassFlags_Factory;
+ unsigned Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
+
+ // FIXME: Set CXX-structors flag.
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(CGM.getContext()),
+ e = ID->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(CGM.getContext()),
+ e = ID->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(CGM.getContext()),
+ e = ID->propimpl_end(CGM.getContext()); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(12);
+ Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+ // Record a reference to the super class.
+ LazySymbols.insert(Super->getIdentifier());
+
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, false);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ Values[10] = BuildIvarLayout(ID, true);
+ Values[11] = EmitClassExtension(ID);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(std::string("\01L_OBJC_CLASS_")+ClassName, Init,
+ "__OBJC,__class,regular,no_dead_strip",
+ 4, true);
+ DefinedClasses.push_back(GV);
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ const ConstantVector &Methods) {
+ unsigned Flags = eClassFlags_Meta;
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+
+ if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+ Flags |= eClassFlags_Hidden;
+
+ std::vector<llvm::Constant*> Values(12);
+ // The isa for the metaclass is the root of the hierarchy.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ Values[ 0] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ // The super class for the metaclass is emitted as the name of the
+ // super class. The runtime fixes this up to point to the
+ // *metaclass* for the super class.
+ if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, true);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip",
+ Methods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ // ivar_layout for metaclass is always NULL.
+ Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ // The class extension is always unused for metaclasses.
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ std::string Name("\01L_OBJC_METACLASS_");
+ Name += ID->getNameAsCString();
+
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ } else {
+ GV = new llvm::GlobalVariable(ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name,
+ &CGM.getModule());
+ }
+ GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+ GV->setAlignment(4);
+ UsedGlobals.push_back(GV);
+
+ return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+
+ // FIXME: Should we look these up somewhere other than the module. Its a bit
+ // silly since we only generate these while processing an implementation, so
+ // exactly one pointer would work if know when we entered/exitted an
+ // implementation block.
+
+ // Check for an existing forward reference.
+ // Previously, metaclass with internal linkage may have been defined.
+ // pass 'true' as 2nd argument so it is returned.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ return GV;
+ } else {
+ // Generate as an external reference to keep a consistent
+ // module. This will be patched up when we emit the metaclass.
+ return new llvm::GlobalVariable(ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name,
+ &CGM.getModule());
+ }
+}
+
+/*
+ struct objc_class_ext {
+ uint32_t size;
+ const char *weak_ivar_layout;
+ struct _objc_property_list *properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = BuildIvarLayout(ID, false);
+ Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getNameAsString(),
+ ID, ID->getClassInterface(), ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+ return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getNameAsString(),
+ Init, "__OBJC,__class_ext,regular,no_dead_strip",
+ 4, true);
+}
+
+/*
+ struct objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+
+ struct objc_ivar_list {
+ int ivar_count;
+ struct objc_ivar list[count];
+ };
+ */
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass) {
+ std::vector<llvm::Constant*> Ivars, Ivar(3);
+
+ // When emitting the root class GCC emits ivar entries for the
+ // actual class structure. It is not clear if we need to follow this
+ // behavior; for now lets try and get away with not doing it. If so,
+ // the cleanest solution would be to make up an ObjCInterfaceDecl
+ // for the class.
+ if (ForClass)
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ ObjCInterfaceDecl *OID =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ GetNamedIvarList(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ Ivar[0] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[1] = GetMethodVarType(IVD);
+ Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD));
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+ }
+
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+ Ivars.size());
+ Values[1] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV;
+ if (ForClass)
+ GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getNameAsString(),
+ Init, "__OBJC,__class_vars,regular,no_dead_strip",
+ 4, true);
+ else
+ GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_"
+ + ID->getNameAsString(),
+ Init, "__OBJC,__instance_vars,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+ struct objc_method {
+ SEL method_name;
+ char *method_types;
+ void *method;
+ };
+
+ struct objc_method_list {
+ struct objc_method_list *obsolete;
+ int count;
+ struct objc_method methods_list[count];
+ };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+ // FIXME: Use DenseMap::lookup
+ llvm::Function *Fn = MethodDefinitions[MD];
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ std::string Name;
+ GetNameForMethod(OMD, CD, Name);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ const llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+ llvm::Function *Method =
+ llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ Name,
+ &CGM.getModule());
+ MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+ return Method;
+}
+
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateMetadataVar(const std::string &Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed) {
+ const llvm::Type *Ty = Init->getType();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ if (Section)
+ GV->setSection(Section);
+ if (Align)
+ GV->setAlignment(Align);
+ if (AddToUsed)
+ UsedGlobals.push_back(GV);
+ return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+ // Abuse this interface function as a place to finalize.
+ FinishModule();
+
+ return NULL;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+}
+
+/*
+
+Objective-C setjmp-longjmp (sjlj) Exception Handling
+--
+
+The basic framework for a @try-catch-finally is as follows:
+{
+ objc_exception_data d;
+ id _rethrow = null;
+ bool _call_try_exit = true;
+
+ objc_exception_try_enter(&d);
+ if (!setjmp(d.jmp_buf)) {
+ ... try body ...
+ } else {
+ // exception path
+ id _caught = objc_exception_extract(&d);
+
+ // enter new try scope for handlers
+ if (!setjmp(d.jmp_buf)) {
+ ... match exception and execute catch blocks ...
+
+ // fell off end, rethrow.
+ _rethrow = _caught;
+ ... jump-through-finally to finally_rethrow ...
+ } else {
+ // exception in catch block
+ _rethrow = objc_exception_extract(&d);
+ _call_try_exit = false;
+ ... jump-through-finally to finally_rethrow ...
+ }
+ }
+ ... jump-through-finally to finally_end ...
+
+finally:
+ if (_call_try_exit)
+ objc_exception_try_exit(&d);
+
+ ... finally block ....
+ ... dispatch to finally destination ...
+
+finally_rethrow:
+ objc_exception_throw(_rethrow);
+
+finally_end:
+}
+
+This framework differs slightly from the one gcc uses, in that gcc
+uses _rethrow to determine if objc_exception_try_exit should be called
+and if the object should be rethrown. This breaks in the face of
+throwing nil and introduces unnecessary branches.
+
+We specialize this framework for a few particular circumstances:
+
+ - If there are no catch blocks, then we avoid emitting the second
+ exception handling context.
+
+ - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+ e)) we avoid emitting the code to rethrow an uncaught exception.
+
+ - FIXME: If there is no @finally block we can do a few more
+ simplifications.
+
+Rethrows and Jumps-Through-Finally
+--
+
+Support for implicit rethrows and jumping through the finally block is
+handled by storing the current exception-handling context in
+ObjCEHStack.
+
+In order to implement proper @finally semantics, we support one basic
+mechanism for jumping through the finally block to an arbitrary
+destination. Constructs which generate exits from a @try or @catch
+block use this mechanism to implement the proper semantics by chaining
+jumps, as necessary.
+
+This mechanism works like the one used for indirect goto: we
+arbitrarily assign an ID to each destination and store the ID for the
+destination in a variable prior to entering the finally block. At the
+end of the finally block we simply create a switch to the proper
+destination.
+
+Code gen for @synchronized(expr) stmt;
+Effectively generating code for:
+objc_sync_enter(expr);
+@try stmt @finally { objc_sync_exit(expr); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ // Create various blocks we refer to for handling @finally.
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit");
+ llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can safely avoid a temp here because jumps into
+ // @synchronized are illegal & this will dominate uses.
+ llvm::Value *SyncArg = 0;
+ if (!isTry) {
+ SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ }
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ CGF.ObjCEHValueStack.push_back(0);
+
+ // Allocate memory for the exception data and rethrow pointer.
+ llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ "exceptiondata.ptr");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
+ "_rethrow");
+ llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(llvm::Type::Int1Ty,
+ "_call_try_exit");
+ CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(), CallTryExitPtr);
+
+ // Enter a new try block and call setjmp.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+ llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0,
+ "jmpbufarray");
+ JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp");
+ llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+ JmpBufPtr, "result");
+
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"),
+ TryHandler, TryBlock);
+
+ // Emit the @try block.
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the "exception in @try" block.
+ CGF.EmitBlock(TryHandler);
+
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::Value *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ CGF.ObjCEHValueStack.back() = Caught;
+ if (!isTry)
+ {
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(), CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ else if (const ObjCAtCatchStmt* CatchStmt =
+ cast<ObjCAtTryStmt>(S).getCatchStmts())
+ {
+ // Enter a new exception try block (in case a @catch block throws
+ // an exception).
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+
+ llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+ JmpBufPtr, "result");
+ llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw");
+
+ llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
+ llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+
+ // Handle catch list. As a special case we check if everything is
+ // matched and avoid generating code for falling off the end if
+ // so.
+ bool AllMatched = false;
+ for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
+
+ const ParmVarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const PointerType *PT = 0;
+
+ // catch(...) always matches.
+ if (!CatchParam) {
+ AllMatched = true;
+ } else {
+ PT = CatchParam->getType()->getAsPointerType();
+
+ // catch(id e) always matches.
+ // FIXME: For the time being we also match id<X>; this should
+ // be rejected by Sema instead.
+ if ((PT && CGF.getContext().isObjCIdStructType(PT->getPointeeType())) ||
+ CatchParam->getType()->isObjCQualifiedIdType())
+ AllMatched = true;
+ }
+
+ if (AllMatched) {
+ if (CatchParam) {
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+ CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+ break;
+ }
+
+ assert(PT && "Unexpected non-pointer type in @catch");
+ QualType T = PT->getPointeeType();
+ const ObjCInterfaceType *ObjCType = T->getAsObjCInterfaceType();
+ assert(ObjCType && "Catch parameter must have Objective-C type!");
+
+ // Check if the @catch block matches the exception object.
+ llvm::Value *Class = EmitClassRef(CGF.Builder, ObjCType->getDecl());
+
+ llvm::Value *Match =
+ CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
+ Class, Caught, "match");
+
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched");
+
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+ MatchedBlock, NextCatchBlock);
+
+ // Emit the @catch block.
+ CGF.EmitBlock(MatchedBlock);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ llvm::Value *Tmp =
+ CGF.Builder.CreateBitCast(Caught, CGF.ConvertType(CatchParam->getType()),
+ "tmp");
+ CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(NextCatchBlock);
+ }
+
+ if (!AllMatched) {
+ // None of the handlers caught the exception, so store it to be
+ // rethrown at the end of the @finally block.
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
+ CGF.Builder.CreateStore(
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData),
+ RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(), CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ } else {
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(), CallTryExitPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+
+ // Pop the exception-handling stack entry. It is important to do
+ // this now, because the code in the @finally block is not in this
+ // context.
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Emit the @finally block.
+ CGF.EmitBlock(FinallyBlock);
+ llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp");
+
+ CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit);
+
+ CGF.EmitBlock(FinallyExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData);
+
+ CGF.EmitBlock(FinallyNoExit);
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ }
+
+ // Emit the switch block
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ CGF.EmitBlock(FinallyRethrow);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj)
+{
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, "assignivar");
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+ Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+/// unsigned version;
+/// unsigned flags;
+/// };
+enum ImageInfoFlags {
+ eImageInfo_FixAndContinue = (1 << 0), // FIXME: Not sure what
+ // this implies.
+ eImageInfo_GarbageCollected = (1 << 1),
+ eImageInfo_GCOnly = (1 << 2),
+ eImageInfo_OptimizedByDyld = (1 << 3), // FIXME: When is this set.
+
+ // A flag indicating that the module has no instances of an
+ // @synthesize of a superclass variable. <rdar://problem/6803242>
+ eImageInfo_CorrectedSynthesize = (1 << 4)
+};
+
+void CGObjCMac::EmitImageInfo() {
+ unsigned version = 0; // Version is unused?
+ unsigned flags = 0;
+
+ // FIXME: Fix and continue?
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+ flags |= eImageInfo_GarbageCollected;
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ flags |= eImageInfo_GCOnly;
+
+ // We never allow @synthesize of a superclass property.
+ flags |= eImageInfo_CorrectedSynthesize;
+
+ // Emitted as int[2];
+ llvm::Constant *values[2] = {
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, version),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, flags)
+ };
+ llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::Int32Ty, 2);
+
+ const char *Section;
+ if (ObjCABI == 1)
+ Section = "__OBJC, __image_info,regular";
+ else
+ Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
+ llvm::ConstantArray::get(AT, values, 2),
+ Section,
+ 0,
+ true);
+ GV->setConstant(true);
+}
+
+
+// struct objc_module {
+// unsigned long version;
+// unsigned long size;
+// const char *name;
+// Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+ uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+ std::vector<llvm::Constant*> Values(4);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ Values[2] = GetClassName(&CGM.getContext().Idents.get(""));
+ Values[3] = EmitModuleSymbols();
+ CreateMetadataVar("\01L_OBJC_MODULES",
+ llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ "__OBJC,__module_info,regular,no_dead_strip",
+ 4, true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+ unsigned NumClasses = DefinedClasses.size();
+ unsigned NumCategories = DefinedCategories.size();
+
+ // Return null if no symbols were defined.
+ if (!NumClasses && !NumCategories)
+ return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+ Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+ Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+ // The runtime expects exactly the list of defined classes followed
+ // by the list of defined categories, in a single array.
+ std::vector<llvm::Constant*> Symbols(NumClasses + NumCategories);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+ ObjCTypes.Int8PtrTy);
+ for (unsigned i=0; i<NumCategories; i++)
+ Symbols[NumClasses + i] =
+ llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+ ObjCTypes.Int8PtrTy);
+
+ Values[4] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses + NumCategories),
+ Symbols);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
+ "__OBJC,__symbols,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ LazySymbols.insert(ID->getIdentifier());
+
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = ClassNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(Ident->getName()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+static QualType::GCAttrTypes GetGCAttrTypeForType(ASTContext &Ctx,
+ QualType FQT) {
+ if (FQT.isObjCGCStrong())
+ return QualType::Strong;
+
+ if (FQT.isObjCGCWeak())
+ return QualType::Weak;
+
+ if (Ctx.isObjCObjectPointerType(FQT))
+ return QualType::Strong;
+
+ if (const PointerType *PT = FQT->getAsPointerType())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+ return QualType::GCNone;
+}
+
+void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos,
+ bool ForStrongLayout,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ // FIXME - Use iterator.
+ llvm::SmallVector<FieldDecl*, 16> Fields(RD->field_begin(CGM.getContext()),
+ RD->field_end(CGM.getContext()));
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
+ ForStrongLayout, HasUnion);
+}
+
+void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ uint64_t MaxUnionIvarSize = 0;
+ uint64_t MaxSkippedUnionIvarSize = 0;
+ FieldDecl *MaxField = 0;
+ FieldDecl *MaxSkippedField = 0;
+ FieldDecl *LastFieldBitfield = 0;
+ uint64_t MaxFieldOffset = 0;
+ uint64_t MaxSkippedFieldOffset = 0;
+ uint64_t LastBitfieldOffset = 0;
+
+ if (RecFields.empty())
+ return;
+ unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ FieldDecl *Field = RecFields[i];
+ uint64_t FieldOffset;
+ if (RD)
+ FieldOffset =
+ Layout->getElementOffset(CGM.getTypes().getLLVMFieldNo(Field));
+ else
+ FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfield = Field;
+ LastBitfieldOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfield = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildAggrIvarRecordLayout(FQT->getAsRecordType(),
+ BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType()) {
+ int OldIndex = IvarsInfo.size() - 1;
+ int OldSkIndex = SkipIvars.size() -1;
+
+ const RecordType *RT = FQT->getAsRecordType();
+ BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = IvarsInfo.size() - 1,
+ FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
+ IvarsInfo[i].ivar_size));
+ for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
+ SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
+ SkipIvars[i].ivar_size));
+ }
+ continue;
+ }
+ }
+ // At this point, we are done with Record/Union and array there of.
+ // For other arrays we are down to its element type.
+ QualType::GCAttrTypes GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
+
+ unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
+ if ((ForStrongLayout && GCAttr == QualType::Strong)
+ || (!ForStrongLayout && GCAttr == QualType::Weak)) {
+ if (IsUnion) {
+ uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
+ if (UnionIvarSize > MaxUnionIvarSize) {
+ MaxUnionIvarSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / WordSizeInBits));
+ }
+ } else if ((ForStrongLayout &&
+ (GCAttr == QualType::GCNone || GCAttr == QualType::Weak))
+ || (!ForStrongLayout && GCAttr != QualType::Weak)) {
+ if (IsUnion) {
+ // FIXME: Why the asymmetry? We divide by word size in bits on other
+ // side.
+ uint64_t UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxSkippedUnionIvarSize) {
+ MaxSkippedUnionIvarSize = UnionIvarSize;
+ MaxSkippedField = Field;
+ MaxSkippedFieldOffset = FieldOffset;
+ }
+ } else {
+ // FIXME: Why the asymmetry, we divide by byte size in bits here?
+ SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+ }
+
+ if (LastFieldBitfield) {
+ // Last field was a bitfield. Must update skip info.
+ Expr *BitWidth = LastFieldBitfield->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ }
+
+ if (MaxField)
+ IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
+ MaxUnionIvarSize));
+ if (MaxSkippedField)
+ SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
+ MaxSkippedUnionIvarSize));
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+ const ObjCImplementationDecl *OMD,
+ bool ForStrongLayout) {
+ bool hasUnion = false;
+
+ unsigned int WordsToScan, WordsToSkip;
+ const llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return llvm::Constant::getNullValue(PtrTy);
+
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ CGM.getContext().CollectObjCIvars(OI, RecFields);
+
+ // Add this implementations synthesized ivars.
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().CollectSynthesizedIvars(OI, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+
+ if (RecFields.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ SkipIvars.clear();
+ IvarsInfo.clear();
+
+ BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+ if (IvarsInfo.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ // Sort on byte position in case we encounterred a union nested in
+ // the ivar list.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ // Build the string of skip/scan nibbles
+ llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+ unsigned int WordSize =
+ CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ if (IvarsInfo[0].ivar_bytepos == 0) {
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ } else {
+ WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ }
+ for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
+ unsigned int TailPrevGCObjC =
+ IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+ if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
+ // consecutive 'scanned' object pointers.
+ WordsToScan += IvarsInfo[i].ivar_size;
+ } else {
+ // Skip over 'gc'able object pointer which lay over each other.
+ if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
+ continue;
+ // Must skip over 1 or more words. We save current skip/scan values
+ // and start a new pair.
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+
+ // Skip the hole.
+ SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[i].ivar_size;
+ }
+ }
+ if (WordsToScan > 0) {
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+ }
+
+ bool BytesSkipped = false;
+ if (!SkipIvars.empty()) {
+ unsigned int LastIndex = SkipIvars.size()-1;
+ int LastByteSkipped =
+ SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+ LastIndex = IvarsInfo.size()-1;
+ int LastByteScanned =
+ IvarsInfo[LastIndex].ivar_bytepos +
+ IvarsInfo[LastIndex].ivar_size * WordSize;
+ BytesSkipped = (LastByteSkipped > LastByteScanned);
+ // Compute number of bytes to skip at the tail end of the last ivar scanned.
+ if (BytesSkipped) {
+ unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
+ SKIP_SCAN SkScan;
+ SkScan.skip = TotalWords - (LastByteScanned/WordSize);
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ }
+ }
+ // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
+ // as 0xMN.
+ int SkipScan = SkipScanIvars.size()-1;
+ for (int i = 0; i <= SkipScan; i++) {
+ if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
+ && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
+ // 0xM0 followed by 0x0N detected.
+ SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
+ for (int j = i+1; j < SkipScan; j++)
+ SkipScanIvars[j] = SkipScanIvars[j+1];
+ --SkipScan;
+ }
+ }
+
+ // Generate the string.
+ std::string BitMap;
+ for (int i = 0; i <= SkipScan; i++) {
+ unsigned char byte;
+ unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
+ unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
+ unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
+ unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
+
+ if (skip_small > 0 || skip_big > 0)
+ BytesSkipped = true;
+ // first skip big.
+ for (unsigned int ix = 0; ix < skip_big; ix++)
+ BitMap += (unsigned char)(0xf0);
+
+ // next (skip small, scan)
+ if (skip_small) {
+ byte = skip_small << 4;
+ if (scan_big > 0) {
+ byte |= 0xf;
+ --scan_big;
+ } else if (scan_small) {
+ byte |= scan_small;
+ scan_small = 0;
+ }
+ BitMap += byte;
+ }
+ // next scan big
+ for (unsigned int ix = 0; ix < scan_big; ix++)
+ BitMap += (unsigned char)(0x0f);
+ // last scan small
+ if (scan_small) {
+ byte = scan_small;
+ BitMap += byte;
+ }
+ }
+ // null terminate string.
+ unsigned char zero = 0;
+ BitMap += zero;
+
+ if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+ printf("\n%s ivar layout for class '%s': ",
+ ForStrongLayout ? "strong" : "weak",
+ OMD->getClassInterface()->getNameAsCString());
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0; i < BitMap.size(); i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+
+ // if ivar_layout bitmap is all 1 bits (nothing skipped) then use NULL as
+ // final layout.
+ if (ForStrongLayout && !BytesSkipped)
+ return llvm::Constant::getNullValue(PtrTy);
+ llvm::GlobalVariable * Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantArray::get(BitMap.c_str()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+ return getConstantGEP(Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+ llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+ // FIXME: Avoid std::string copying.
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
+ llvm::ConstantArray::get(Sel.getAsString()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+ return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(const std::string &Name) {
+ return GetMethodVarName(&CGM.getContext().Idents.get(Name));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(const_cast<ObjCMethodDecl*>(D),
+ TypeStr);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantArray::get(TypeStr),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
+ llvm::ConstantArray::get(Ident->getName()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+ CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+ const ObjCContainerDecl *CD,
+ std::string &NameOut) {
+ NameOut = '\01';
+ NameOut += (D->isInstanceMethod() ? '-' : '+');
+ NameOut += '[';
+ assert (CD && "Missing container decl in GetNameForMethod");
+ NameOut += CD->getNameAsString();
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext())) {
+ NameOut += '(';
+ NameOut += CID->getNameAsString();
+ NameOut+= ')';
+ }
+ NameOut += ' ';
+ NameOut += D->getSelector().getAsString();
+ NameOut += ']';
+}
+
+void CGObjCMac::FinishModule() {
+ EmitModuleInfo();
+
+ // Emit the dummy bodies for any protocols which were referenced but
+ // never defined.
+ for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+ i = Protocols.begin(), e = Protocols.end(); i != e; ++i) {
+ if (i->second->hasInitializer())
+ continue;
+
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ Values[1] = GetClassName(i->first);
+ Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[3] = Values[4] =
+ llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+ i->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+ i->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values));
+ }
+
+ std::vector<llvm::Constant*> Used;
+ for (std::vector<llvm::GlobalVariable*>::iterator i = UsedGlobals.begin(),
+ e = UsedGlobals.end(); i != e; ++i) {
+ Used.push_back(llvm::ConstantExpr::getBitCast(*i, ObjCTypes.Int8PtrTy));
+ }
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy, Used.size());
+ llvm::GlobalValue *GV =
+ new llvm::GlobalVariable(AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Used),
+ "llvm.used",
+ &CGM.getModule());
+
+ GV->setSection("llvm.metadata");
+
+ // Add assembler directives to add lazy undefined symbol references
+ // for classes which are referenced but not defined. This is
+ // important for correct linker interaction.
+
+ // FIXME: Uh, this isn't particularly portable.
+ std::stringstream s;
+
+ if (!CGM.getModule().getModuleInlineAsm().empty())
+ s << "\n";
+
+ for (std::set<IdentifierInfo*>::iterator i = LazySymbols.begin(),
+ e = LazySymbols.end(); i != e; ++i) {
+ s << "\t.lazy_reference .objc_class_name_" << (*i)->getName() << "\n";
+ }
+ for (std::set<IdentifierInfo*>::iterator i = DefinedSymbols.begin(),
+ e = DefinedSymbols.end(); i != e; ++i) {
+ s << "\t.objc_class_name_" << (*i)->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << (*i)->getName() << "\n";
+ }
+
+ CGM.getModule().appendModuleInlineAsm(s.str());
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm)
+{
+ ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
+ ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+: CGM(cgm)
+{
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ ShortTy = Types.ConvertType(Ctx.ShortTy);
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ LongTy = Types.ConvertType(Ctx.LongTy);
+ LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ Int8PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+
+ ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+ PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+ // FIXME: It would be nice to unify this with the opaque type, so that the IR
+ // comes out a bit cleaner.
+ const llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+
+ // I'm not sure I like this. The implicit coordination is a bit
+ // gross. We should solve this in a reasonable fashion because this
+ // is a pretty common task (match some runtime data structure with
+ // an LLVM data structure).
+
+ // FIXME: This is leaked.
+ // FIXME: Merge with rewriter code?
+
+ // struct _objc_super {
+ // id self;
+ // Class cls;
+ // }
+ RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+ SourceLocation(),
+ &Ctx.Idents.get("_objc_super"));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCIdType(), 0, false));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCClassType(), 0, false));
+ RD->completeDefinition(Ctx);
+
+ SuperCTy = Ctx.getTagDeclType(RD);
+ SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+ SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+ SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+ // struct _prop_t {
+ // char *name;
+ // char *attributes;
+ // }
+ PropertyTy = llvm::StructType::get(Int8PtrTy, Int8PtrTy, NULL);
+ CGM.getModule().addTypeName("struct._prop_t",
+ PropertyTy);
+
+ // struct _prop_list_t {
+ // uint32_t entsize; // sizeof(struct _prop_t)
+ // uint32_t count_of_properties;
+ // struct _prop_t prop_list[count_of_properties];
+ // }
+ PropertyListTy = llvm::StructType::get(IntTy,
+ IntTy,
+ llvm::ArrayType::get(PropertyTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._prop_list_t",
+ PropertyListTy);
+ // struct _prop_list_t *
+ PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+ // struct _objc_method {
+ // SEL _cmd;
+ // char *method_type;
+ // char *_imp;
+ // }
+ MethodTy = llvm::StructType::get(SelectorPtrTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method", MethodTy);
+
+ // struct _objc_cache *
+ CacheTy = llvm::OpaqueType::get();
+ CGM.getModule().addTypeName("struct._objc_cache", CacheTy);
+ CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm)
+{
+ // struct _objc_method_description {
+ // SEL name;
+ // char *types;
+ // }
+ MethodDescriptionTy =
+ llvm::StructType::get(SelectorPtrTy,
+ Int8PtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description",
+ MethodDescriptionTy);
+
+ // struct _objc_method_description_list {
+ // int count;
+ // struct _objc_method_description[1];
+ // }
+ MethodDescriptionListTy =
+ llvm::StructType::get(IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_method_description_list",
+ MethodDescriptionListTy);
+
+ // struct _objc_method_description_list *
+ MethodDescriptionListPtrTy =
+ llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+ // Protocol description structures
+
+ // struct _objc_protocol_extension {
+ // uint32_t size; // sizeof(struct _objc_protocol_extension)
+ // struct _objc_method_description_list *optional_instance_methods;
+ // struct _objc_method_description_list *optional_class_methods;
+ // struct _objc_property_list *instance_properties;
+ // }
+ ProtocolExtensionTy =
+ llvm::StructType::get(IntTy,
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_extension",
+ ProtocolExtensionTy);
+
+ // struct _objc_protocol_extension *
+ ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+ // Handle recursive construction of Protocol and ProtocolList types
+
+ llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get();
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get();
+
+ const llvm::Type *T =
+ llvm::StructType::get(llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ LongTy,
+ llvm::ArrayType::get(ProtocolTyHolder, 0),
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T);
+
+ // struct _objc_protocol {
+ // struct _objc_protocol_extension *isa;
+ // char *protocol_name;
+ // struct _objc_protocol **_objc_protocol_list;
+ // struct _objc_method_description_list *instance_methods;
+ // struct _objc_method_description_list *class_methods;
+ // }
+ T = llvm::StructType::get(ProtocolExtensionPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTyHolder),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T);
+
+ ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListTy);
+ // struct _objc_protocol_list *
+ ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+ ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy);
+ ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+ // Class description structures
+
+ // struct _objc_ivar {
+ // char *ivar_name;
+ // char *ivar_type;
+ // int ivar_offset;
+ // }
+ IvarTy = llvm::StructType::get(Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_ivar", IvarTy);
+
+ // struct _objc_ivar_list *
+ IvarListTy = llvm::OpaqueType::get();
+ CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy);
+ IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+ // struct _objc_method_list *
+ MethodListTy = llvm::OpaqueType::get();
+ CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy);
+ MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+ // struct _objc_class_extension *
+ ClassExtensionTy =
+ llvm::StructType::get(IntTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy);
+ ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get();
+
+ // struct _objc_class {
+ // Class isa;
+ // Class super_class;
+ // char *name;
+ // long version;
+ // long info;
+ // long instance_size;
+ // struct _objc_ivar_list *ivars;
+ // struct _objc_method_list *methods;
+ // struct _objc_cache *cache;
+ // struct _objc_protocol_list *protocols;
+ // char *ivar_layout;
+ // struct _objc_class_ext *ext;
+ // };
+ T = llvm::StructType::get(llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T);
+
+ ClassTy = cast<llvm::StructType>(ClassTyHolder.get());
+ CGM.getModule().addTypeName("struct._objc_class", ClassTy);
+ ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+ // struct _objc_category {
+ // char *category_name;
+ // char *class_name;
+ // struct _objc_method_list *instance_method;
+ // struct _objc_method_list *class_method;
+ // uint32_t size; // sizeof(struct _objc_category)
+ // struct _objc_property_list *instance_properties;// category's @property
+ // }
+ CategoryTy = llvm::StructType::get(Int8PtrTy,
+ Int8PtrTy,
+ MethodListPtrTy,
+ MethodListPtrTy,
+ ProtocolListPtrTy,
+ IntTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_category", CategoryTy);
+
+ // Global metadata structures
+
+ // struct _objc_symtab {
+ // long sel_ref_cnt;
+ // SEL *refs;
+ // short cls_def_cnt;
+ // short cat_def_cnt;
+ // char *defs[cls_def_cnt + cat_def_cnt];
+ // }
+ SymtabTy = llvm::StructType::get(LongTy,
+ SelectorPtrTy,
+ ShortTy,
+ ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy);
+ SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+ // struct _objc_module {
+ // long version;
+ // long size; // sizeof(struct _objc_module)
+ // char *name;
+ // struct _objc_symtab* symtab;
+ // }
+ ModuleTy =
+ llvm::StructType::get(LongTy,
+ LongTy,
+ Int8PtrTy,
+ SymtabPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_module", ModuleTy);
+
+
+ // FIXME: This is the size of the setjmp buffer and should be target
+ // specific. 18 is what's used on 32-bit X86.
+ uint64_t SetJmpBufferSize = 18;
+
+ // Exceptions
+ const llvm::Type *StackPtrTy =
+ llvm::ArrayType::get(llvm::PointerType::getUnqual(llvm::Type::Int8Ty), 4);
+
+ ExceptionDataTy =
+ llvm::StructType::get(llvm::ArrayType::get(llvm::Type::Int32Ty,
+ SetJmpBufferSize),
+ StackPtrTy, NULL);
+ CGM.getModule().addTypeName("struct._objc_exception_data",
+ ExceptionDataTy);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+: ObjCCommonTypesHelper(cgm)
+{
+ // struct _method_list_t {
+ // uint32_t entsize; // sizeof(struct _objc_method)
+ // uint32_t method_count;
+ // struct _objc_method method_list[method_count];
+ // }
+ MethodListnfABITy = llvm::StructType::get(IntTy,
+ IntTy,
+ llvm::ArrayType::get(MethodTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct.__method_list_t",
+ MethodListnfABITy);
+ // struct method_list_t *
+ MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+ // struct _protocol_t {
+ // id isa; // NULL
+ // const char * const protocol_name;
+ // const struct _protocol_list_t * protocol_list; // super protocols
+ // const struct method_list_t * const instance_methods;
+ // const struct method_list_t * const class_methods;
+ // const struct method_list_t *optionalInstanceMethods;
+ // const struct method_list_t *optionalClassMethods;
+ // const struct _prop_list_t * properties;
+ // const uint32_t size; // sizeof(struct _protocol_t)
+ // const uint32_t flags; // = 0
+ // }
+
+ // Holder for struct _protocol_list_t *
+ llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get();
+
+ ProtocolnfABITy = llvm::StructType::get(ObjectPtrTy,
+ Int8PtrTy,
+ llvm::PointerType::getUnqual(
+ ProtocolListTyHolder),
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ PropertyListPtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._protocol_t",
+ ProtocolnfABITy);
+
+ // struct _protocol_t*
+ ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+ // struct _protocol_list_t {
+ // long protocol_count; // Note, this is 32/64 bit
+ // struct _protocol_t *[protocol_count];
+ // }
+ ProtocolListnfABITy = llvm::StructType::get(LongTy,
+ llvm::ArrayType::get(
+ ProtocolnfABIPtrTy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_protocol_list",
+ ProtocolListnfABITy);
+ cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(
+ ProtocolListnfABITy);
+
+ // struct _objc_protocol_list*
+ ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+ // struct _ivar_t {
+ // unsigned long int *offset; // pointer to ivar offset location
+ // char *name;
+ // char *type;
+ // uint32_t alignment;
+ // uint32_t size;
+ // }
+ IvarnfABITy = llvm::StructType::get(llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy,
+ Int8PtrTy,
+ IntTy,
+ IntTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy);
+
+ // struct _ivar_list_t {
+ // uint32 entsize; // sizeof(struct _ivar_t)
+ // uint32 count;
+ // struct _iver_t list[count];
+ // }
+ IvarListnfABITy = llvm::StructType::get(IntTy,
+ IntTy,
+ llvm::ArrayType::get(
+ IvarnfABITy, 0),
+ NULL);
+ CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy);
+
+ IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+ // struct _class_ro_t {
+ // uint32_t const flags;
+ // uint32_t const instanceStart;
+ // uint32_t const instanceSize;
+ // uint32_t const reserved; // only when building for 64bit targets
+ // const uint8_t * const ivarLayout;
+ // const char *const name;
+ // const struct _method_list_t * const baseMethods;
+ // const struct _objc_protocol_list *const baseProtocols;
+ // const struct _ivar_list_t *const ivars;
+ // const uint8_t * const weakIvarLayout;
+ // const struct _prop_list_t * const properties;
+ // }
+
+ // FIXME. Add 'reserved' field in 64bit abi mode!
+ ClassRonfABITy = llvm::StructType::get(IntTy,
+ IntTy,
+ IntTy,
+ Int8PtrTy,
+ Int8PtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._class_ro_t",
+ ClassRonfABITy);
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(ObjectPtrTy);
+ Params.push_back(SelectorPtrTy);
+ ImpnfABITy = llvm::PointerType::getUnqual(
+ llvm::FunctionType::get(ObjectPtrTy, Params, false));
+
+ // struct _class_t {
+ // struct _class_t *isa;
+ // struct _class_t * const superclass;
+ // void *cache;
+ // IMP *vtable;
+ // struct class_ro_t *ro;
+ // }
+
+ llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get();
+ ClassnfABITy = llvm::StructType::get(llvm::PointerType::getUnqual(ClassTyHolder),
+ llvm::PointerType::getUnqual(ClassTyHolder),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(
+ ClassRonfABITy),
+ NULL);
+ CGM.getModule().addTypeName("struct._class_t", ClassnfABITy);
+
+ cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(
+ ClassnfABITy);
+
+ // LLVM for struct _class_t *
+ ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+ // struct _category_t {
+ // const char * const name;
+ // struct _class_t *const cls;
+ // const struct _method_list_t * const instance_methods;
+ // const struct _method_list_t * const class_methods;
+ // const struct _protocol_list_t * const protocols;
+ // const struct _prop_list_t * const properties;
+ // }
+ CategorynfABITy = llvm::StructType::get(Int8PtrTy,
+ ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._category_t", CategorynfABITy);
+
+ // New types for nonfragile abi messaging.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+
+ // First the clang type for struct _message_ref_t
+ RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+ SourceLocation(),
+ &Ctx.Idents.get("_message_ref_t"));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.VoidPtrTy, 0, false));
+ RD->addDecl(Ctx, FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+ Ctx.getObjCSelType(), 0, false));
+ RD->completeDefinition(Ctx);
+
+ MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+ MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ SuperMessageRefTy = llvm::StructType::get(ImpnfABITy,
+ SelectorPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy);
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+ // struct objc_typeinfo {
+ // const void** vtable; // objc_ehtype_vtable + 2
+ // const char* name; // c++ typeinfo string
+ // Class cls;
+ // };
+ EHTypeTy = llvm::StructType::get(llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy,
+ ClassnfABIPtrTy,
+ NULL);
+ CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy);
+ EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+ FinishNonFragileABIModule();
+
+ return NULL;
+}
+
+void CGObjCNonFragileABIMac::AddModuleClassList(const
+ std::vector<llvm::GlobalValue*>
+ &Container,
+ const char *SymbolName,
+ const char *SectionName) {
+ unsigned NumClasses = Container.size();
+
+ if (!NumClasses)
+ return;
+
+ std::vector<llvm::Constant*> Symbols(NumClasses);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+ ObjCTypes.Int8PtrTy);
+ llvm::Constant* Init =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ NumClasses),
+ Symbols);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ SymbolName,
+ &CGM.getModule());
+ GV->setAlignment(8);
+ GV->setSection(SectionName);
+ UsedGlobals.push_back(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+ // nonfragile abi has no module definition.
+
+ // Build list of all implemented class addresses in array
+ // L_OBJC_LABEL_CLASS_$.
+ AddModuleClassList(DefinedClasses,
+ "\01L_OBJC_LABEL_CLASS_$",
+ "__DATA, __objc_classlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyClasses,
+ "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+ // Build list of all implemented category addresses in array
+ // L_OBJC_LABEL_CATEGORY_$.
+ AddModuleClassList(DefinedCategories,
+ "\01L_OBJC_LABEL_CATEGORY_$",
+ "__DATA, __objc_catlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyCategories,
+ "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+ // static int L_OBJC_IMAGE_INFO[2] = { 0, flags };
+ // FIXME. flags can be 0 | 1 | 2 | 6. For now just use 0
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, 0);
+ unsigned int flags = 0;
+ // FIXME: Fix and continue?
+ if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+ flags |= eImageInfo_GarbageCollected;
+ if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ flags |= eImageInfo_GCOnly;
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ llvm::Constant* Init = llvm::ConstantArray::get(
+ llvm::ArrayType::get(ObjCTypes.IntTy, 2),
+ Values);
+ llvm::GlobalVariable *IMGV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ "\01L_OBJC_IMAGE_INFO",
+ &CGM.getModule());
+ IMGV->setSection("__DATA, __objc_imageinfo, regular, no_dead_strip");
+ IMGV->setConstant(true);
+ UsedGlobals.push_back(IMGV);
+
+ std::vector<llvm::Constant*> Used;
+
+ for (std::vector<llvm::GlobalVariable*>::iterator i = UsedGlobals.begin(),
+ e = UsedGlobals.end(); i != e; ++i) {
+ Used.push_back(llvm::ConstantExpr::getBitCast(*i, ObjCTypes.Int8PtrTy));
+ }
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy, Used.size());
+ llvm::GlobalValue *GV =
+ new llvm::GlobalVariable(AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Used),
+ "llvm.used",
+ &CGM.getModule());
+
+ GV->setSection("llvm.metadata");
+
+}
+
+/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+/// NonLegacyDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+///
+bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
+ if (NonLegacyDispatchMethods.empty()) {
+ NonLegacyDispatchMethods.insert(GetNullarySelector("alloc"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("class"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("self"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("length"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("count"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("retain"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("release"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("autorelease"));
+ NonLegacyDispatchMethods.insert(GetNullarySelector("hash"));
+
+ NonLegacyDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("isEqual"));
+ NonLegacyDispatchMethods.insert(GetUnarySelector("addObject"));
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ NonLegacyDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+ return (NonLegacyDispatchMethods.count(Sel) == 0);
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20
+};
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+/// uint32_t const flags;
+/// uint32_t const instanceStart;
+/// uint32_t const instanceSize;
+/// uint32_t const reserved; // only when building for 64bit targets
+/// const uint8_t * const ivarLayout;
+/// const char *const name;
+/// const struct _method_list_t * const baseMethods;
+/// const struct _protocol_list_t *const baseProtocols;
+/// const struct _ivar_list_t *const ivars;
+/// const uint8_t * const weakIvarLayout;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+ unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+ Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+ Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+ // FIXME. For 64bit targets add 0 here.
+ Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, true);
+ Values[ 4] = GetClassName(ID->getIdentifier());
+ // const struct _method_list_t * const baseMethods;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName("\01l_OBJC_$_");
+ if (flags & CLS_META) {
+ MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(CGM.getContext()),
+ e = ID->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ } else {
+ MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(CGM.getContext()),
+ e = ID->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(CGM.getContext()),
+ e = ID->propimpl_end(CGM.getContext()); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ }
+ }
+ }
+ Values[ 5] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const", Methods);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+ Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ + OID->getNameAsString(),
+ OID->protocol_begin(),
+ OID->protocol_end());
+
+ if (flags & CLS_META)
+ Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ else
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, false);
+ if (flags & CLS_META)
+ Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ else
+ Values[ 9] =
+ EmitPropertyList(
+ "\01l_OBJC_$_PROP_LIST_" + ID->getNameAsString(),
+ ID, ID->getClassInterface(), ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+ Values);
+ llvm::GlobalVariable *CLASS_RO_GV =
+ new llvm::GlobalVariable(ObjCTypes.ClassRonfABITy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ (flags & CLS_META) ?
+ std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+ std::string("\01l_OBJC_CLASS_RO_$_")+ClassName,
+ &CGM.getModule());
+ CLASS_RO_GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassRonfABITy));
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
+ return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t * const superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
+ std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ std::vector<llvm::Constant*> Values(5);
+ Values[0] = IsAGV;
+ Values[1] = SuperClassGV
+ ? SuperClassGV
+ : llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ Values[2] = ObjCEmptyCacheVar; // &ObjCEmptyCacheVar
+ Values[3] = ObjCEmptyVtableVar; // &ObjCEmptyVtableVar
+ Values[4] = ClassRoGV; // &CLASS_RO_GV
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+ Values);
+ llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+ GV->setInitializer(Init);
+ GV->setSection("__DATA, __objc_data");
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassnfABITy));
+ if (HiddenVisibility)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(CGM.getContext(), GetNullarySelector("load")) != 0;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize) {
+ const ASTRecordLayout &RL =
+ CGM.getContext().getASTObjCImplementationLayout(OID);
+
+ // InstanceSize is really instance end.
+ InstanceSize = llvm::RoundUpToAlignment(RL.getNextOffset(), 8) / 8;
+
+ // If there are no fields, the start is the same as the end.
+ if (!RL.getFieldCount())
+ InstanceStart = InstanceSize;
+ else
+ InstanceStart = RL.getFieldOffset(0) / 8;
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ if (!ObjCEmptyCacheVar) {
+ ObjCEmptyCacheVar = new llvm::GlobalVariable(
+ ObjCTypes.CacheTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_cache",
+ &CGM.getModule());
+
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_vtable",
+ &CGM.getModule());
+ }
+ assert(ID->getClassInterface() &&
+ "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+ // FIXME: Is this correct (that meta class size is never computed)?
+ uint32_t InstanceStart =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ uint32_t InstanceSize = InstanceStart;
+ uint32_t flags = CLS_META;
+ std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
+ std::string ObjCClassName(getClassSymbolPrefix());
+
+ llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+ bool classIsHidden =
+ CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (!ID->getClassInterface()->getSuperClass()) {
+ // class is root
+ flags |= CLS_ROOT;
+ SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
+ IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+ } else {
+ // Has a root. Current class is not a root.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
+ // work on super class metadata symbol.
+ std::string SuperClassName =
+ ObjCMetaClassName + ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(SuperClassName);
+ }
+ llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,ID);
+ std::string TClassName = ObjCMetaClassName + ClassName;
+ llvm::GlobalVariable *MetaTClass =
+ BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+
+ // Metadata for the class
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (hasObjCExceptionAttribute(ID->getClassInterface()))
+ flags |= CLS_EXCEPTION;
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ flags |= CLS_ROOT;
+ SuperClassGV = 0;
+ } else {
+ // Has a root. Current class is not a root.
+ std::string RootClassName =
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
+ }
+ GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+ CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,
+ ID);
+
+ TClassName = ObjCClassName + ClassName;
+ llvm::GlobalVariable *ClassMD =
+ BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedClasses.push_back(ClassMD);
+
+ // Determine if this class is also "non-lazy".
+ if (ImplementationIsNonLazy(ID))
+ DefinedNonLazyClasses.push_back(ClassMD);
+
+ // Force the definition of the EHType if necessary.
+ if (flags & CLS_EXCEPTION)
+ GetInterfaceEHType(ID->getClassInterface(), true);
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+/// @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+
+ // This routine is called for @protocol only. So, we must build definition
+ // of protocol's meta-data (not a reference to it!)
+ //
+ llvm::Constant *Init = llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+ ObjCTypes.ExternalProtocolPtrTy);
+
+ std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ ProtocolName += PD->getNameAsCString();
+
+ llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+ if (PTGV)
+ return Builder.CreateLoad(PTGV, false, "tmp");
+ PTGV = new llvm::GlobalVariable(
+ Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ProtocolName,
+ &CGM.getModule());
+ PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ UsedGlobals.push_back(PTGV);
+ return Builder.CreateLoad(PTGV, false, "tmp");
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+/// const char * const name;
+/// struct _class_t *const cls;
+/// const struct _method_list_t * const instance_methods;
+/// const struct _method_list_t * const class_methods;
+/// const struct _protocol_list_t * const protocols;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+ std::string ExtCatName(Prefix + Interface->getNameAsString()+
+ "_$_" + OCD->getNameAsString());
+ std::string ExtClassName(getClassSymbolPrefix() +
+ Interface->getNameAsString());
+
+ std::vector<llvm::Constant*> Values(6);
+ Values[0] = GetClassName(OCD->getIdentifier());
+ // meta-class entry symbol
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
+ Values[1] = ClassGV;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName(Prefix);
+ MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
+ "_$_" + OCD->getNameAsString();
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(CGM.getContext()),
+ e = OCD->instmeth_end(CGM.getContext()); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[2] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+
+ MethodListName = Prefix;
+ MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString();
+ Methods.clear();
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(CGM.getContext()),
+ e = OCD->classmeth_end(CGM.getContext()); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[3] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ std::string ExtName(Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString());
+ Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ + Interface->getNameAsString() + "_$_"
+ + Category->getNameAsString(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ Values[5] =
+ EmitPropertyList(std::string("\01l_OBJC_$_PROP_LIST_") + ExtName,
+ OCD, Category, ObjCTypes);
+ }
+ else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+ Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+ Values);
+ llvm::GlobalVariable *GCATV
+ = new llvm::GlobalVariable(ObjCTypes.CategorynfABITy,
+ false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ ExtCatName,
+ &CGM.getModule());
+ GCATV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.CategorynfABITy));
+ GCATV->setSection("__DATA, __objc_const");
+ UsedGlobals.push_back(GCATV);
+ DefinedCategories.push_back(GCATV);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(OCD))
+ DefinedNonLazyCategories.push_back(GCATV);
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+ const ObjCMethodDecl *MD) {
+ // FIXME: Use DenseMap::lookup
+ llvm::Function *Fn = MethodDefinitions[MD];
+ if (!Fn)
+ return 0;
+
+ std::vector<llvm::Constant*> Method(3);
+ Method[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Method[1] = GetMethodVarType(MD);
+ Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(
+ const std::string &Name,
+ const char *Section,
+ const ConstantVector &Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+ std::vector<llvm::Constant*> Values(3);
+ // sizeof(struct _objc_method)
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ // method_count
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ GV->setSection(Section);
+ UsedGlobals.push_back(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable * CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ // FIXME: We shouldn't need to do this lookup.
+ unsigned Index;
+ const ObjCInterfaceDecl *Container =
+ FindIvarInterface(CGM.getContext(), ID, Ivar, Index);
+ assert(Container && "Unable to find ivar container!");
+ std::string Name = "OBJC_IVAR_$_" + Container->getNameAsString() +
+ '.' + Ivar->getNameAsString();
+ llvm::GlobalVariable *IvarOffsetGV =
+ CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV)
+ IvarOffsetGV =
+ new llvm::GlobalVariable(ObjCTypes.LongTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name,
+ &CGM.getModule());
+ return IvarOffsetGV;
+}
+
+llvm::Constant * CGObjCNonFragileABIMac::EmitIvarOffsetVar(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int Offset) {
+ llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+ IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
+ Offset));
+ IvarOffsetGV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.LongTy));
+
+ // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+ // well (i.e., in ObjCIvarOffsetVariable).
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ CGM.getDeclVisibilityMode(ID) == LangOptions::Hidden)
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ else
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ IvarOffsetGV->setSection("__DATA, __objc_const");
+ return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// char *name;
+/// char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _iver_t list[count];
+/// }
+///
+
+void CGObjCCommonMac::GetNamedIvarList(const ObjCInterfaceDecl *OID,
+ llvm::SmallVector<ObjCIvarDecl*, 16> &Res) const {
+ for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
+ E = OID->ivar_end(); I != E; ++I) {
+ // Ignore unnamed bit-fields.
+ if (!(*I)->getDeclName())
+ continue;
+
+ Res.push_back(*I);
+ }
+
+ // Also save synthesize ivars.
+ // FIXME. Why can't we just use passed in Res small vector?
+ llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+ CGM.getContext().CollectSynthesizedIvars(OID, Ivars);
+ for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+ Res.push_back(Ivars[k]);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+ const ObjCImplementationDecl *ID) {
+
+ std::vector<llvm::Constant*> Ivars, Ivar(5);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+ // FIXME. Consolidate this with similar code in GenerateClass.
+
+ // Collect declared and synthesized ivars in a small vector.
+ llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+ GetNamedIvarList(OID, OIvars);
+
+ for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+ ObjCIvarDecl *IVD = OIvars[i];
+ Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD));
+ Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[2] = GetMethodVarType(IVD);
+ const llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(IVD->getType());
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Align = CGM.getContext().getPreferredTypeAlign(
+ IVD->getType().getTypePtr()) >> 3;
+ Align = llvm::Log2_32(Align);
+ Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ // NOTE. Size of a bitfield does not match gcc's, because of the
+ // way bitfields are treated special in each. But I am told that
+ // 'size' for bitfield ivars is ignored by the runtime so it does
+ // not matter. If it matters, there is enough info to get the
+ // bitfield right!
+ Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ }
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ std::vector<llvm::Constant*> Values(3);
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+ Ivars.size());
+ Values[2] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Prefix + OID->getNameAsString(),
+ &CGM.getModule());
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ GV->setSection("__DATA, __objc_const");
+
+ UsedGlobals.push_back(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getNameAsString(),
+ &CGM.getModule());
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char * const protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t * const instance_methods;
+/// const struct method_list_t * const class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ const char *ProtocolName = PD->getNameAsCString();
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(CGM.getContext()),
+ e = PD->instmeth_end(CGM.getContext());
+ i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ } else {
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(CGM.getContext()),
+ e = PD->classmeth_end(CGM.getContext());
+ i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ } else {
+ ClassMethods.push_back(C);
+ }
+ }
+
+ std::vector<llvm::Constant*> Values(10);
+ // isa is NULL
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] = EmitProtocolList(
+ "\01l_OBJC_$_PROTOCOL_REFS_" + PD->getNameAsString(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+
+ Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ InstanceMethods);
+ Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ ClassMethods);
+ Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ OptInstanceMethods);
+ Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getNameAsString(),
+ "__DATA, __objc_const",
+ OptClassMethods);
+ Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getNameAsString(),
+ 0, PD, ObjCTypes);
+ uint32_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ std::string("\01l_OBJC_PROTOCOL_$_")+ProtocolName,
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABITy));
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ // Use this protocol meta-data to build protocol list table in section
+ // __DATA, __objc_protolist
+ llvm::GlobalVariable *PTGV = new llvm::GlobalVariable(
+ ObjCTypes.ProtocolnfABIPtrTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Entry,
+ std::string("\01l_OBJC_LABEL_PROTOCOL_$_")
+ +ProtocolName,
+ &CGM.getModule());
+ PTGV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ UsedGlobals.push_back(PTGV);
+ return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(const std::string &Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ std::vector<llvm::Constant*> ProtocolRefs;
+
+ // Just return null for empty protocol lists
+ if (begin == end)
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+ // FIXME: We shouldn't need to do this lookup here, should we?
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
+ if (GV)
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(
+ ObjCTypes.ProtocolnfABIPtrTy));
+
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[1] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ GV = new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ GV->setSection("__DATA, __objc_const");
+ GV->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+ UsedGlobals.push_back(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ std::vector<llvm::Constant*> Desc(3);
+ Desc[0] = llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+ CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),
+ false, "ivar");
+}
+
+CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs) {
+ // FIXME. Even though IsSuper is passes. This function doese not handle calls
+ // to 'super' receivers.
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::Value *Arg0 = Receiver;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+
+ // Find the message function name.
+ // FIXME. This is too much work to get the ABI-specific result type needed to
+ // find the message name.
+ const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType,
+ llvm::SmallVector<QualType, 16>());
+ llvm::Constant *Fn = 0;
+ std::string Name("\01l_");
+ if (CGM.ReturnTypeUsesSret(FnInfo)) {
+#if 0
+ // unlike what is documented. gcc never generates this API!!
+ if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+ Fn = ObjCTypes.getMessageSendIdStretFixupFn();
+ // FIXME. Is there a better way of getting these names.
+ // They are available in RuntimeFunctions vector pair.
+ Name += "objc_msgSendId_stret_fixup";
+ }
+ else
+#endif
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ Name += "objc_msgSendSuper2_stret_fixup";
+ }
+ else
+ {
+ Fn = ObjCTypes.getMessageSendStretFixupFn();
+ Name += "objc_msgSend_stret_fixup";
+ }
+ }
+ else if (!IsSuper && ResultType->isFloatingType()) {
+ if (const BuiltinType *BT = ResultType->getAsBuiltinType()) {
+ BuiltinType::Kind k = BT->getKind();
+ if (k == BuiltinType::LongDouble) {
+ Fn = ObjCTypes.getMessageSendFpretFixupFn();
+ Name += "objc_msgSend_fpret_fixup";
+ }
+ else {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
+ }
+ }
+ else {
+#if 0
+// unlike what is documented. gcc never generates this API!!
+ if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+ Fn = ObjCTypes.getMessageSendIdFixupFn();
+ Name += "objc_msgSendId_fixup";
+ }
+ else
+#endif
+ if (IsSuper) {
+ Fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ Name += "objc_msgSendSuper2_fixup";
+ }
+ else
+ {
+ Fn = ObjCTypes.getMessageSendFixupFn();
+ Name += "objc_msgSend_fixup";
+ }
+ }
+ assert(Fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ Name += '_';
+ std::string SelName(Sel.getAsString());
+ // Replace all ':' in selector name with '_' ouch!
+ for(unsigned i = 0; i < SelName.size(); i++)
+ if (SelName[i] == ':')
+ SelName[i] = '_';
+ Name += SelName;
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (!GV) {
+ // Build message ref table entry.
+ std::vector<llvm::Constant*> Values(2);
+ Values[0] = Fn;
+ Values[1] = GetMethodVarName(Sel);
+ llvm::Constant *Init = llvm::ConstantStruct::get(Values);
+ GV = new llvm::GlobalVariable(Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ Name,
+ &CGM.getModule());
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setAlignment(16);
+ GV->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+ llvm::Value *Arg1 = CGF.Builder.CreateBitCast(GV, ObjCTypes.MessageRefPtrTy);
+
+ CallArgList ActualArgs;
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+ ActualArgs.push_back(std::make_pair(RValue::get(Arg1),
+ ObjCTypes.MessageRefCPtrTy));
+ ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+ const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs);
+ llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
+ Callee = CGF.Builder.CreateLoad(Callee);
+ const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
+ Callee = CGF.Builder.CreateBitCast(Callee,
+ llvm::PointerType::getUnqual(FTy));
+ return CGF.EmitCall(FnInfo1, Callee, ActualArgs);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue CGObjCNonFragileABIMac::GenerateMessageSend(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ return LegacyDispatchedSelector(Sel)
+ ? EmitLegacyMessageSend(CGF, ResultType, EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, ObjCTypes)
+ : EmitMessageSend(CGF, ResultType, Sel,
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+ if (!GV) {
+ GV = new llvm::GlobalVariable(ObjCTypes.ClassnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, Name, &CGM.getModule());
+ }
+
+ return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_",
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_",
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+ if (Entry)
+ return Builder.CreateLoad(Entry, false, "tmp");
+
+ std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ MetaClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_",
+ &CGM.getModule());
+ Entry->setAlignment(
+ CGM.getTargetData().getPrefTypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs) {
+ // ...
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to "super' in a class method defined in
+ // a category implementation.
+ Target = EmitClassRef(CGF.Builder, Class);
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ }
+ else
+ Target = EmitMetaClassRef(CGF.Builder, Class);
+ }
+ else
+ Target = EmitSuperClassRef(CGF.Builder, Class);
+
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ const llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ return (LegacyDispatchedSelector(Sel))
+ ? EmitLegacyMessageSend(CGF, ResultType,EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs,
+ ObjCTypes)
+ : EmitMessageSend(CGF, ResultType, Sel,
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
+ Selector Sel) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ new llvm::GlobalVariable(ObjCTypes.SelectorPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Casted, "\01L_OBJC_SELECTOR_REFERENCES_",
+ &CGM.getModule());
+ Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ UsedGlobals.push_back(Entry);
+ }
+
+ return Builder.CreateLoad(Entry, false, "tmp");
+}
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, "assignivar");
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj)
+{
+ const llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst)
+{
+ const llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ return;
+}
+
+void
+CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+ llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+ llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can safely avoid a temp here because jumps into
+ // @synchronized are illegal & this will dominate uses.
+ llvm::Value *SyncArg = 0;
+ if (!isTry) {
+ SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ }
+
+ // Push an EH context entry, used for handling rethrows and jumps
+ // through finally.
+ CGF.PushCleanupBlock(FinallyBlock);
+
+ CGF.setInvokeDest(TryHandler);
+
+ CGF.EmitBlock(TryBlock);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ // Emit the exception handler.
+
+ CGF.EmitBlock(TryHandler);
+
+ llvm::Value *llvm_eh_exception =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector_i64 =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i64);
+ llvm::Value *llvm_eh_typeid_for_i64 =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i64);
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+ SelectorArgs.push_back(Exc);
+ SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
+
+ // Construct the lists of (type, catch body) to handle.
+ llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+ bool HasCatchAll = false;
+ if (isTry) {
+ if (const ObjCAtCatchStmt* CatchStmt =
+ cast<ObjCAtTryStmt>(S).getCatchStmts()) {
+ for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+ const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+ Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
+
+ // catch(...) always matches.
+ if (!CatchDecl) {
+ // Use i8* null here to signal this is a catch all, not a cleanup.
+ llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ SelectorArgs.push_back(Null);
+ HasCatchAll = true;
+ break;
+ }
+
+ if (CGF.getContext().isObjCIdType(CatchDecl->getType()) ||
+ CatchDecl->getType()->isObjCQualifiedIdType()) {
+ llvm::Value *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id", &CGM.getModule());
+ SelectorArgs.push_back(IDEHType);
+ HasCatchAll = true;
+ break;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const PointerType *PT = CatchDecl->getType()->getAsPointerType();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT =
+ PT->getPointeeType()->getAsObjCInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
+ SelectorArgs.push_back(EHType);
+ }
+ }
+ }
+
+ // We use a cleanup unless there was already a catch all.
+ if (!HasCatchAll) {
+ SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ }
+
+ llvm::Value *Selector =
+ CGF.Builder.CreateCall(llvm_eh_selector_i64,
+ SelectorArgs.begin(), SelectorArgs.end(),
+ "selector");
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ const ParmVarDecl *CatchParam = Handlers[i].first;
+ const Stmt *CatchBody = Handlers[i].second;
+
+ llvm::BasicBlock *Next = 0;
+
+ // The last handler always matches.
+ if (i + 1 != e) {
+ assert(CatchParam && "Only last handler can be a catch all.");
+
+ llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+ Next = CGF.createBasicBlock("catch.next");
+ llvm::Value *Id =
+ CGF.Builder.CreateCall(llvm_eh_typeid_for_i64,
+ CGF.Builder.CreateBitCast(SelectorArgs[i+2],
+ ObjCTypes.Int8PtrTy));
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id),
+ Match, Next);
+
+ CGF.EmitBlock(Match);
+ }
+
+ if (CatchBody) {
+ llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
+ llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
+
+ // Cleanups must call objc_end_catch.
+ //
+ // FIXME: It seems incorrect for objc_begin_catch to be inside this
+ // context, but this matches gcc.
+ CGF.PushCleanupBlock(MatchEnd);
+ CGF.setInvokeDest(MatchHandler);
+
+ llvm::Value *ExcObject =
+ CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
+
+ // Bind the catch parameter if it exists.
+ if (CatchParam) {
+ ExcObject =
+ CGF.Builder.CreateBitCast(ExcObject,
+ CGF.ConvertType(CatchParam->getType()));
+ // CatchParam is a ParmVarDecl because of the grammar
+ // construction used to handle this, but for codegen purposes
+ // we treat this as a local decl.
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.ObjCEHValueStack.push_back(ExcObject);
+ CGF.EmitStmt(CatchBody);
+ CGF.ObjCEHValueStack.pop_back();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(MatchHandler);
+
+ llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ llvm::SmallVector<llvm::Value*, 8> Args;
+ Args.push_back(Exc);
+ Args.push_back(ObjCTypes.getEHPersonalityPtr());
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 0));
+ CGF.Builder.CreateCall(llvm_eh_selector_i64, Args.begin(), Args.end());
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+ CGF.EmitBlock(MatchEnd);
+
+ // Unfortunately, we also have to generate another EH frame here
+ // in case this throws.
+ llvm::BasicBlock *MatchEndHandler =
+ CGF.createBasicBlock("match.end.handler");
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
+ Cont, MatchEndHandler,
+ Args.begin(), Args.begin());
+
+ CGF.EmitBlock(Cont);
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ CGF.EmitBlock(MatchEndHandler);
+ Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+ // We are required to emit this call to satisfy LLVM, even
+ // though we don't use the result.
+ Args.clear();
+ Args.push_back(Exc);
+ Args.push_back(ObjCTypes.getEHPersonalityPtr());
+ Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ 0));
+ CGF.Builder.CreateCall(llvm_eh_selector_i64, Args.begin(), Args.end());
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (Next)
+ CGF.EmitBlock(Next);
+ } else {
+ assert(!Next && "catchup should be last handler.");
+
+ CGF.Builder.CreateStore(Exc, RethrowPtr);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+
+ // Pop the cleanup entry, the @finally is outside this cleanup
+ // scope.
+ CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+ CGF.setInvokeDest(PrevLandingPad);
+
+ CGF.EmitBlock(FinallyBlock);
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+ } else {
+ // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ }
+
+ if (Info.SwitchBlock)
+ CGF.EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ CGF.EmitBlock(Info.EndBlock);
+
+ // Branch around the rethrow code.
+ CGF.EmitBranch(FinallyEnd);
+
+ CGF.EmitBlock(FinallyRethrow);
+ CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr));
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(FinallyEnd);
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *Exception;
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ Exception = CGF.EmitScalarExpr(ThrowExpr);
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ Exception = CGF.ObjCEHValueStack.back();
+ }
+
+ llvm::Value *ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+ llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
+ if (InvokeDest) {
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ CGF.Builder.CreateInvoke(ObjCTypes.getExceptionThrowFn(),
+ Cont, InvokeDest,
+ &ExceptionAsObject, &ExceptionAsObject + 1);
+ CGF.EmitBlock(Cont);
+ } else
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition) {
+ llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+ // If we don't need a definition, return the entry if found or check
+ // if we use an external reference.
+ if (!ForDefinition) {
+ if (Entry)
+ return Entry;
+
+ // If this type (or a super class) has the __objc_exception__
+ // attribute, emit an external reference.
+ if (hasObjCExceptionAttribute(ID))
+ return Entry =
+ new llvm::GlobalVariable(ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ (std::string("OBJC_EHTYPE_$_") +
+ ID->getIdentifier()->getName()),
+ &CGM.getModule());
+ }
+
+ // Otherwise we need to either make a new entry or fill in the
+ // initializer.
+ assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string VTableName = "objc_ehtype_vtable";
+ llvm::GlobalVariable *VTableGV =
+ CGM.getModule().getGlobalVariable(VTableName);
+ if (!VTableGV)
+ VTableGV = new llvm::GlobalVariable(ObjCTypes.Int8PtrTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, VTableName, &CGM.getModule());
+
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 2);
+
+ std::vector<llvm::Constant*> Values(3);
+ Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
+ Values[1] = GetClassName(ID->getIdentifier());
+ Values[2] = GetClassGlobal(ClassName);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+ if (Entry) {
+ Entry->setInitializer(Init);
+ } else {
+ Entry = new llvm::GlobalVariable(ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ (std::string("OBJC_EHTYPE_$_") +
+ ID->getIdentifier()->getName()),
+ &CGM.getModule());
+ }
+
+ if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Entry->setAlignment(8);
+
+ if (ForDefinition) {
+ Entry->setSection("__DATA,__objc_const");
+ Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCMac(CGM);
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacNonFragileABIObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ return new CGObjCNonFragileABIMac(CGM);
+}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..b8cf026
--- /dev/null
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,206 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation. Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
+#define CLANG_CODEGEN_OBCJRUNTIME_H
+#include "clang/Basic/IdentifierTable.h" // Selector
+#include "llvm/ADT/SmallVector.h"
+#include "clang/AST/DeclObjC.h"
+#include <string>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Constant;
+ class Function;
+ class Module;
+ class StructLayout;
+ class StructType;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenFunction;
+}
+
+ class FieldDecl;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCContainerDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCProtocolDecl;
+ class Selector;
+ class ObjCIvarDecl;
+ class ObjCStringLiteral;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+public:
+ // Utility functions for unified ivar access. These need to
+ // eventually be folded into other places (the structure layout
+ // code).
+
+protected:
+ /// Compute an offset to the given ivar, suitable for passing to
+ /// EmitValueForIvarAtOffset. Note that the correct handling of
+ /// bit-fields is carefully coordinated by these two, use caution!
+ ///
+ /// The latter overload is suitable for computing the offset of a
+ /// sythesized ivar.
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar);
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar);
+
+ LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset);
+
+public:
+ virtual ~CGObjCRuntime();
+
+ /// Generate the function required to register all Objective-C components in
+ /// this compilation unit with the runtime library.
+ virtual llvm::Function *ModuleInitFunction() = 0;
+
+ /// Get a selector for the specified name and type values. The
+ /// return value should have the LLVM type for pointer-to
+ /// ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ Selector Sel) = 0;
+
+ /// Get a typed selector.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method) = 0;
+
+ /// Generate a constant string object.
+ virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *) = 0;
+
+ /// Generate a category. A category contains a list of methods (and
+ /// accompanying metadata) and a list of protocols.
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+ /// Generate a class stucture for this class.
+ virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+ /// Generate an Objective-C message send operation.
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method=0) = 0;
+
+ /// Generate an Objective-C message send operation to the super
+ /// class initiated in a method for Class and with the given Self
+ /// object.
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Self,
+ bool IsClassMessage,
+ const CallArgList &CallArgs) = 0;
+
+ /// Emit the code to return the named protocol as an object, as in a
+ /// @protocol expression.
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
+ virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate a function preamble for a method with the specified
+ /// types.
+
+ // FIXME: Current this just generates the Function definition, but really this
+ // should also be generating the loads of the parameters, as the runtime
+ // should have full control over how parameters are passed.
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) = 0;
+
+ /// Return the runtime function for getting properties.
+ virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+ /// Return the runtime function for setting properties.
+ virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+ /// GetClass - Return a reference to the class for the given
+ /// interface decl.
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) = 0;
+
+ /// EnumerationMutationFunction - Return the function that's called by the
+ /// compiler when a mutation is detected during foreach iteration.
+ virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+ virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) = 0;
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) = 0;
+ virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) = 0;
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) = 0;
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) = 0;
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacNonFragileABIObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..b67996c
--- /dev/null
+++ b/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,1022 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(S->getLocStart());
+ DI->EmitStopPoint(CurFn, Builder);
+ }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+ assert(S && "Null statement?");
+
+ // Check if we can handle this without bothering to generate an
+ // insert point or debug info.
+ if (EmitSimpleStmt(S))
+ return;
+
+ // If we happen to be at an unreachable point just create a dummy
+ // basic block to hold the code. We could change parts of irgen to
+ // simply not generate this code, but this situation is rare and
+ // probably not worth the effort.
+ // FIXME: Verify previous performance/effort claim.
+ EnsureInsertPoint();
+
+ // Generate a stoppoint if we are emitting debug info.
+ EmitStopPoint(S);
+
+ switch (S->getStmtClass()) {
+ default:
+ // Must be an expression in a stmt context. Emit the value (to get
+ // side-effects) and ignore the result.
+ if (const Expr *E = dyn_cast<Expr>(S)) {
+ EmitAnyExpr(E, 0, false, true);
+ } else {
+ ErrorUnsupported(S, "statement");
+ }
+ break;
+ case Stmt::IndirectGotoStmtClass:
+ EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+ case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
+ case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
+ case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
+ case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
+
+ case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
+ case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
+
+ case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
+ case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+
+ case Stmt::ObjCAtTryStmtClass:
+ EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+ break;
+ case Stmt::ObjCAtCatchStmtClass:
+ assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtFinallyStmtClass:
+ assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
+ break;
+ case Stmt::ObjCAtThrowStmtClass:
+ EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+ break;
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+ break;
+ }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default: return false;
+ case Stmt::NullStmtClass: break;
+ case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+ case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
+ case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
+ case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+ case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
+ case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
+ }
+
+ return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ llvm::Value *AggLoc, bool isAggVol) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+ "LLVM IR generation of compound statement ('{}')");
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ EnsureInsertPoint();
+ DI->setLocation(S.getLBracLoc());
+ // FIXME: The llvm backend is currently not ready to deal with region_end
+ // for block scoping. In the presence of always_inline functions it gets so
+ // confused that it doesn't emit any debug info. Just disable this for now.
+ //DI->EmitRegionStart(CurFn, Builder);
+ }
+
+ // Keep track of the current cleanup stack depth.
+ size_t CleanupStackDepth = CleanupEntries.size();
+ bool OldDidCallStackSave = DidCallStackSave;
+ DidCallStackSave = false;
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end()-GetLast; I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI) {
+ EnsureInsertPoint();
+ DI->setLocation(S.getRBracLoc());
+
+ // FIXME: The llvm backend is currently not ready to deal with region_end
+ // for block scoping. In the presence of always_inline functions it gets so
+ // confused that it doesn't emit any debug info. Just disable this for now.
+ //DI->EmitRegionEnd(CurFn, Builder);
+ }
+
+ RValue RV;
+ if (!GetLast)
+ RV = RValue::get(0);
+ else {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ const Stmt *LastStmt = S.body_back();
+ while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+ EmitLabel(*LS);
+ LastStmt = LS->getSubStmt();
+ }
+
+ EnsureInsertPoint();
+
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+ }
+
+ DidCallStackSave = OldDidCallStackSave;
+
+ EmitCleanupBlocks(CleanupStackDepth);
+
+ return RV;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+ llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+ // If there is a cleanup stack, then we it isn't worth trying to
+ // simplify this block (we would need to remove it from the scope map
+ // and cleanup entry).
+ if (!CleanupEntries.empty())
+ return;
+
+ // Can only simplify direct branches.
+ if (!BI || !BI->isUnconditional())
+ return;
+
+ BB->replaceAllUsesWith(BI->getSuccessor(0));
+ BI->eraseFromParent();
+ BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ // Fall out of the current block (if necessary).
+ EmitBranch(BB);
+
+ if (IsFinished && BB->use_empty()) {
+ delete BB;
+ return;
+ }
+
+ // If necessary, associate the block with the cleanup stack size.
+ if (!CleanupEntries.empty()) {
+ // Check if the basic block has already been inserted.
+ BlockScopeMap::iterator I = BlockScopes.find(BB);
+ if (I != BlockScopes.end()) {
+ assert(I->second == CleanupEntries.size() - 1);
+ } else {
+ BlockScopes[BB] = CleanupEntries.size() - 1;
+ CleanupEntries.back().Blocks.push_back(BB);
+ }
+ }
+
+ CurFn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+ // Emit a branch from the current block to the target one if this
+ // was a real block. If this was just a fall-through block after a
+ // terminator, don't emit it.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (!CurBB || CurBB->getTerminator()) {
+ // If there is no insert point or the previous block is already
+ // terminated, don't touch it.
+ } else {
+ // Otherwise, create a fall-through branch.
+ Builder.CreateBr(Target);
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitLabel(const LabelStmt &S) {
+ EmitBlock(getBasicBlockForLabel(&S));
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+ EmitLabel(S);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
+}
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ // Emit initial switch which will be patched up later by
+ // EmitIndirectSwitches(). We need a default dest, so we use the
+ // current BB, but this is overwritten.
+ llvm::Value *V = Builder.CreatePtrToInt(EmitScalarExpr(S.getTarget()),
+ llvm::Type::Int32Ty,
+ "addr");
+ llvm::SwitchInst *I = Builder.CreateSwitch(V, Builder.GetInsertBlock());
+ IndirectSwitches.push_back(I);
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // C99 6.8.4.1: The first substatement is executed if the expression compares
+ // unequal to 0. The condition must be a scalar type.
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
+ // Figure out which block (then or else) is executed.
+ const Stmt *Executed = S.getThen(), *Skipped = S.getElse();
+ if (Cond == -1) // Condition false?
+ std::swap(Executed, Skipped);
+
+ // If the skipped block has no labels in it, just emit the executed block.
+ // This avoids emitting dead code and simplifies the CFG substantially.
+ if (!ContainsLabel(Skipped)) {
+ if (Executed)
+ EmitStmt(Executed);
+ return;
+ }
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
+ // the conditional branch.
+ llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+ llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+ llvm::BasicBlock *ElseBlock = ContBlock;
+ if (S.getElse())
+ ElseBlock = createBasicBlock("if.else");
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+ // Emit the 'then' code.
+ EmitBlock(ThenBlock);
+ EmitStmt(S.getThen());
+ EmitBranch(ContBlock);
+
+ // Emit the 'else' code if present.
+ if (const Stmt *Else = S.getElse()) {
+ EmitBlock(ElseBlock);
+ EmitStmt(Else);
+ EmitBranch(ContBlock);
+ }
+
+ // Emit the continuation block for code after the if.
+ EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+ // Emit the header for the loop, insert it, which will create an uncond br to
+ // it.
+ llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
+ EmitBlock(LoopHeader);
+
+ // Create an exit block for when the condition fails, create a block for the
+ // body of the loop.
+ llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
+
+ // Evaluate the conditional in the while header. C99 6.8.5.1: The
+ // evaluation of the controlling expression takes place before each
+ // execution of the loop body.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // while(1) is common, avoid extra exit blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, go to the loop body.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ // Emit the loop body.
+ EmitBlock(LoopBody);
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Cycle to the condition.
+ EmitBranch(LoopHeader);
+
+ // Emit the exit block.
+ EmitBlock(ExitBlock, true);
+
+ // The LoopHeader typically is just a branch if we skipped emitting
+ // a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader);
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
+ // Emit the body for the loop, insert it, which will create an uncond br to
+ // it.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
+ EmitBlock(LoopBody);
+
+ llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
+
+ // Emit the body of the loop into the block.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(DoCond);
+
+ // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+ // after each execution of the loop body."
+
+ // Evaluate the conditional in the while header.
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isZero())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, iterate the loop.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
+
+ // Emit the exit block.
+ EmitBlock(AfterDo);
+
+ // The DoCond block typically is just a branch if we skipped
+ // emitting a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(DoCond);
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S) {
+ // FIXME: What do we do if the increment (f.e.) contains a stmt expression,
+ // which contains a continue/break?
+
+ // Evaluate the first part before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ // Evaluate the condition if present. If not, treat it as a
+ // non-zero-constant according to 6.8.5.3p2, aka, true.
+ if (S.getCond()) {
+ // As long as the condition is true, iterate the loop.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ EmitBranchOnBoolExpr(S.getCond(), ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+ } else {
+ // Treat it as a non-zero constant. Don't even create a new block for the
+ // body, just fall into it.
+ }
+
+ // If the for loop doesn't have an increment we can just use the
+ // condition as the continue block.
+ llvm::BasicBlock *ContinueBlock;
+ if (S.getInc())
+ ContinueBlock = createBasicBlock("for.inc");
+ else
+ ContinueBlock = CondBlock;
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
+
+ // If the condition is true, execute the body of the for stmt.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // If there is an increment, emit it next.
+ if (S.getInc()) {
+ EmitBlock(ContinueBlock);
+ EmitStmt(S.getInc());
+ }
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+ if (RV.isScalar()) {
+ Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+ } else if (RV.isAggregate()) {
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ } else {
+ StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
+ }
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void. Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+ // Emit the result value, even if unused, to evalute the side effects.
+ const Expr *RV = S.getRetValue();
+
+ // FIXME: Clean this up by using an LValue for ReturnTemp,
+ // EmitStoreThroughLValue, and EmitAnyExpr.
+ if (!ReturnValue) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (RV)
+ EmitAnyExpr(RV);
+ } else if (RV == 0) {
+ // Do nothing (return value is left uninitialized)
+ } else if (FnRetTy->isReferenceType()) {
+ // If this function returns a reference, take the address of the expression
+ // rather than the value.
+ Builder.CreateStore(EmitLValue(RV).getAddress(), ReturnValue);
+ } else if (!hasAggregateLLVMType(RV->getType())) {
+ Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ } else if (RV->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(RV, ReturnValue, false);
+ } else {
+ EmitAggExpr(RV, ReturnValue, false);
+ }
+
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+ for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
+ I != E; ++I)
+ EmitDecl(**I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+ assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+ assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+ llvm::APSInt LHS = S.getLHS()->EvaluateAsInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateAsInt(getContext());
+
+ // Emit the code for this case. We do this first to make sure it is
+ // properly chained from our predecessor before generating the
+ // switch machinery to enter this block.
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ EmitStmt(S.getSubStmt());
+
+ // If range is empty, do nothing.
+ if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+ return;
+
+ llvm::APInt Range = RHS - LHS;
+ // FIXME: parameters such as this should not be hardcoded.
+ if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+ // Range is small enough to add multiple switch instruction cases.
+ for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+ SwitchInsn->addCase(llvm::ConstantInt::get(LHS), CaseDest);
+ LHS++;
+ }
+ return;
+ }
+
+ // The range is too big. Emit "if" condition into a new block,
+ // making sure to save and restore the current insertion point.
+ llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+ // Push this test onto the chain of range checks (which terminates
+ // in the default basic block). The switch's default will be changed
+ // to the top of this chain after switch emission is complete.
+ llvm::BasicBlock *FalseDest = CaseRangeBlock;
+ CaseRangeBlock = createBasicBlock("sw.caserange");
+
+ CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ Builder.SetInsertPoint(CaseRangeBlock);
+
+ // Emit range check.
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(), llvm::ConstantInt::get(LHS),
+ "tmp");
+ llvm::Value *Cond =
+ Builder.CreateICmpULE(Diff, llvm::ConstantInt::get(Range), "tmp");
+ Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+ // Restore the appropriate insertion point.
+ if (RestoreBB)
+ Builder.SetInsertPoint(RestoreBB);
+ else
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ if (S.getRHS()) {
+ EmitCaseStmtRange(S);
+ return;
+ }
+
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(CaseVal), CaseDest);
+
+ // Recursively emitting the statement is acceptable, but is not wonderful for
+ // code where we have many case statements nested together, i.e.:
+ // case 1:
+ // case 2:
+ // case 3: etc.
+ // Handling this recursively will create a new block for each case statement
+ // that falls through to the next case which is IR intensive. It also causes
+ // deep recursion which can run into stack depth limitations. Handle
+ // sequential non-range case statements specially.
+ const CaseStmt *CurCase = &S;
+ const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+ // Otherwise, iteratively add consequtive cases to this switch stmt.
+ while (NextCase && NextCase->getRHS() == 0) {
+ CurCase = NextCase;
+ CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
+ SwitchInsn->addCase(llvm::ConstantInt::get(CaseVal), CaseDest);
+
+ NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+ }
+
+ // Normal default recursion for non-cases.
+ EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+ llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+ assert(DefaultBlock->empty() &&
+ "EmitDefaultStmt: Default block already defined?");
+ EmitBlock(DefaultBlock);
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+ llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+ // Create basic block to hold stuff that comes after switch
+ // statement. We also need to create a default block now so that
+ // explicit case ranges tests can have a place to jump to on
+ // failure.
+ llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
+ llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+ SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+ CaseRangeBlock = DefaultBlock;
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // All break statements jump to NextBlock. If BreakContinueStack is non empty
+ // then reuse last ContinueBlock.
+ llvm::BasicBlock *ContinueBlock = 0;
+ if (!BreakContinueStack.empty())
+ ContinueBlock = BreakContinueStack.back().ContinueBlock;
+
+ // Ensure any vlas created between there and here, are undone
+ BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
+
+ // Emit switch body.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Update the default block in case explicit case range tests have
+ // been chained on top.
+ SwitchInsn->setSuccessor(0, CaseRangeBlock);
+
+ // If a default was never emitted then reroute any jumps to it and
+ // discard.
+ if (!DefaultBlock->getParent()) {
+ DefaultBlock->replaceAllUsesWith(NextBlock);
+ delete DefaultBlock;
+ }
+
+ // Emit continuation.
+ EmitBlock(NextBlock, true);
+
+ SwitchInsn = SavedSwitchInsn;
+ CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, TargetInfo &Target,
+ llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+ std::string Result;
+
+ while (*Constraint) {
+ switch (*Constraint) {
+ default:
+ Result += Target.convertConstraint(*Constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ break;
+ case 'g':
+ Result += "imr";
+ break;
+ case '[': {
+ assert(OutCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned Index;
+ bool result = Target.resolveSymbolicName(Constraint,
+ &(*OutCons)[0],
+ OutCons->size(), Index);
+ assert(result && "Could not resolve symbolic name"); result=result;
+ Result += llvm::utostr(Index);
+ break;
+ }
+ }
+
+ Constraint++;
+ }
+
+ return Result;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ llvm::Value *Arg;
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
+ const llvm::Type *Ty = ConvertType(InputExpr->getType());
+
+ if (Ty->isSingleValueType()) {
+ Arg = EmitScalarExpr(InputExpr);
+ } else {
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+
+ uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ Ty = llvm::IntegerType::get(Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
+ } else {
+ Arg = Dest.getAddress();
+ ConstraintStr += '*';
+ }
+ }
+ } else {
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ Arg = Dest.getAddress();
+ ConstraintStr += '*';
+ }
+
+ return Arg;
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
+
+ // Assemble the pieces into the final asm string.
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+
+ // Get all the output and input constraints together.
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
+ S.getOutputName(i));
+ bool result = Target.validateOutputConstraint(Info);
+ assert(result && "Failed to parse output constraint"); result=result;
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
+ S.getInputName(i));
+ bool result = Target.validateInputConstraint(OutputConstraintInfos.data(),
+ S.getNumOutputs(),
+ Info); result=result;
+ assert(result && "Failed to parse input constraint");
+ InputConstraintInfos.push_back(Info);
+ }
+
+ std::string Constraints;
+
+ std::vector<LValue> ResultRegDests;
+ std::vector<QualType> ResultRegQualTys;
+ std::vector<const llvm::Type *> ResultRegTypes;
+ std::vector<const llvm::Type *> ResultTruncRegTypes;
+ std::vector<const llvm::Type*> ArgTypes;
+ std::vector<llvm::Value*> Args;
+
+ // Keep track of inout constraints.
+ std::string InOutConstraints;
+ std::vector<llvm::Value*> InOutArgs;
+ std::vector<const llvm::Type*> InOutArgTypes;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string OutputConstraint(S.getOutputConstraint(i));
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+
+ const Expr *OutExpr = S.getOutputExpr(i);
+ OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+ LValue Dest = EmitLValue(OutExpr);
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // If this is a register output, then make the inline asm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
+ Constraints += "=" + OutputConstraint;
+ ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegDests.push_back(Dest);
+ ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+ ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+ // If this output is tied to an input, and if the input is larger, then
+ // we need to set the actual result type of the inline asm node to be the
+ // same as the input type.
+ if (Info.hasMatchingInput()) {
+ unsigned InputNo;
+ for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+ TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+ if (Input.hasTiedOperand() &&
+ Input.getTiedOperand() == i)
+ break;
+ }
+ assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+ QualType InputTy = S.getInputExpr(InputNo)->getType();
+ QualType OutputTy = OutExpr->getType();
+
+ uint64_t InputSize = getContext().getTypeSize(InputTy);
+ if (getContext().getTypeSize(OutputTy) < InputSize) {
+ // Form the asm to return the value as a larger integer type.
+ ResultRegTypes.back() = llvm::IntegerType::get((unsigned)InputSize);
+ }
+ }
+
+ } else {
+ ArgTypes.push_back(Dest.getAddress()->getType());
+ Args.push_back(Dest.getAddress());
+ Constraints += "=*";
+ Constraints += OutputConstraint;
+ }
+
+ if (Info.isReadWrite()) {
+ InOutConstraints += ',';
+
+ const Expr *InputExpr = S.getOutputExpr(i);
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, InOutConstraints);
+
+ if (Info.allowsRegister())
+ InOutConstraints += llvm::utostr(i);
+ else
+ InOutConstraints += OutputConstraint;
+
+ InOutArgTypes.push_back(Arg->getType());
+ InOutArgs.push_back(Arg);
+ }
+ }
+
+ unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ const Expr *InputExpr = S.getInputExpr(i);
+
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // Simplify the input constraint.
+ std::string InputConstraint(S.getInputConstraint(i));
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+ &OutputConstraintInfos);
+
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+
+ // If this input argument is tied to a larger output result, extend the
+ // input to be the same size as the output. The LLVM backend wants to see
+ // the input and output of a matching constraint be the same size. Note
+ // that GCC does not define what the top bits are here. We use zext because
+ // that is usually cheaper, but LLVM IR should really get an anyext someday.
+ if (Info.hasTiedOperand()) {
+ unsigned Output = Info.getTiedOperand();
+ QualType OutputTy = S.getOutputExpr(Output)->getType();
+ QualType InputTy = InputExpr->getType();
+
+ if (getContext().getTypeSize(OutputTy) >
+ getContext().getTypeSize(InputTy)) {
+ // Use ptrtoint as appropriate so that we can do our extension.
+ if (isa<llvm::PointerType>(Arg->getType()))
+ Arg = Builder.CreatePtrToInt(Arg,
+ llvm::IntegerType::get(LLVMPointerWidth));
+ unsigned OutputSize = (unsigned)getContext().getTypeSize(OutputTy);
+ Arg = Builder.CreateZExt(Arg, llvm::IntegerType::get(OutputSize));
+ }
+ }
+
+
+ ArgTypes.push_back(Arg->getType());
+ Args.push_back(Arg);
+ Constraints += InputConstraint;
+ }
+
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+ std::string Clobber(S.getClobber(i)->getStrData(),
+ S.getClobber(i)->getByteLength());
+
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber.c_str());
+
+ if (i != 0 || NumConstraints != 0)
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = Target.getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ const llvm::Type *ResultType;
+ if (ResultRegTypes.empty())
+ ResultType = llvm::Type::VoidTy;
+ else if (ResultRegTypes.size() == 1)
+ ResultType = ResultRegTypes[0];
+ else
+ ResultType = llvm::StructType::get(ResultRegTypes);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints,
+ S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+
+
+ // Extract all of the register value results from the asm.
+ std::vector<llvm::Value*> RegResults;
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+ const llvm::Type *TruncTy = ResultTruncRegTypes[i];
+ // Truncate the integer result to the right size, note that
+ // ResultTruncRegTypes can be a pointer.
+ uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get((unsigned)ResSize));
+
+ if (Tmp->getType() != TruncTy) {
+ assert(isa<llvm::PointerType>(TruncTy));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ }
+ }
+
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
+ ResultRegQualTys[i]);
+ }
+}
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..820e1bd6
--- /dev/null
+++ b/lib/CodeGen/CGValue.h
@@ -0,0 +1,323 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVALUE_H
+#define CLANG_CODEGEN_CGVALUE_H
+
+#include "clang/AST/Type.h"
+
+namespace llvm {
+ class Constant;
+ class Value;
+}
+
+namespace clang {
+ class ObjCPropertyRefExpr;
+ class ObjCKVCRefExpr;
+
+namespace CodeGen {
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated. It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+ llvm::Value *V1, *V2;
+ // TODO: Encode this into the low bit of pointer for more efficient
+ // return-by-value.
+ enum { Scalar, Complex, Aggregate } Flavor;
+
+ bool Volatile:1;
+public:
+
+ bool isScalar() const { return Flavor == Scalar; }
+ bool isComplex() const { return Flavor == Complex; }
+ bool isAggregate() const { return Flavor == Aggregate; }
+
+ bool isVolatileQualified() const { return Volatile; }
+
+ /// getScalar() - Return the Value* of this scalar value.
+ llvm::Value *getScalarVal() const {
+ assert(isScalar() && "Not a scalar!");
+ return V1;
+ }
+
+ /// getComplexVal - Return the real/imag components of this complex value.
+ ///
+ std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+ return std::pair<llvm::Value *, llvm::Value *>(V1, V2);
+ }
+
+ /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+ llvm::Value *getAggregateAddr() const {
+ assert(isAggregate() && "Not an aggregate!");
+ return V1;
+ }
+
+ static RValue get(llvm::Value *V) {
+ RValue ER;
+ ER.V1 = V;
+ ER.Flavor = Scalar;
+ ER.Volatile = false;
+ return ER;
+ }
+ static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+ RValue ER;
+ ER.V1 = V1;
+ ER.V2 = V2;
+ ER.Flavor = Complex;
+ ER.Volatile = false;
+ return ER;
+ }
+ static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+ RValue ER;
+ ER.V1 = C.first;
+ ER.V2 = C.second;
+ ER.Flavor = Complex;
+ ER.Volatile = false;
+ return ER;
+ }
+ // FIXME: Aggregate rvalues need to retain information about whether they are
+ // volatile or not. Remove default to find all places that probably get this
+ // wrong.
+ static RValue getAggregate(llvm::Value *V, bool Vol = false) {
+ RValue ER;
+ ER.V1 = V;
+ ER.Flavor = Aggregate;
+ ER.Volatile = Vol;
+ return ER;
+ }
+};
+
+
+/// LValue - This represents an lvalue references. Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+ // FIXME: alignment?
+
+ enum {
+ Simple, // This is a normal l-value, use getAddress().
+ VectorElt, // This is a vector element l-value (V[i]), use getVector*
+ BitField, // This is a bitfield l-value, use getBitfield*.
+ ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
+ PropertyRef, // This is an Objective-C property reference, use
+ // getPropertyRefExpr
+ KVCRef // This is an objective-c 'implicit' property ref,
+ // use getKVCRefExpr
+ } LVType;
+
+ enum ObjCType {
+ None = 0, // object with no gc attribute.
+ Weak, // __weak object expression
+ Strong // __strong object expression
+ };
+
+ llvm::Value *V;
+
+ union {
+ // Index into a vector subscript: V[i]
+ llvm::Value *VectorIdx;
+
+ // ExtVector element subset: V.xyx
+ llvm::Constant *VectorElts;
+
+ // BitField start bit and size
+ struct {
+ unsigned short StartBit;
+ unsigned short Size;
+ bool IsSigned;
+ } BitfieldData;
+
+ // Obj-C property reference expression
+ const ObjCPropertyRefExpr *PropertyRefExpr;
+ // ObjC 'implicit' property reference expression
+ const ObjCKVCRefExpr *KVCRefExpr;
+ };
+
+ bool Volatile:1;
+ // FIXME: set but never used, what effect should it have?
+ bool Restrict:1;
+
+ // objective-c's ivar
+ bool Ivar:1;
+
+ // LValue is non-gc'able for any reason, including being a parameter or local
+ // variable.
+ bool NonGC: 1;
+
+ // Lvalue is a global reference of an objective-c object
+ bool GlobalObjCRef : 1;
+
+ // objective-c's gc attributes
+ unsigned ObjCType : 2;
+
+
+
+private:
+ static void SetQualifiers(unsigned Qualifiers, LValue& R) {
+ R.Volatile = (Qualifiers&QualType::Volatile)!=0;
+ R.Restrict = (Qualifiers&QualType::Restrict)!=0;
+ // FIXME: Convenient place to set objc flags to 0. This should really be
+ // done in a user-defined constructor instead.
+ R.ObjCType = None;
+ R.Ivar = R.NonGC = R.GlobalObjCRef = false;
+ }
+
+public:
+ bool isSimple() const { return LVType == Simple; }
+ bool isVectorElt() const { return LVType == VectorElt; }
+ bool isBitfield() const { return LVType == BitField; }
+ bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+ bool isPropertyRef() const { return LVType == PropertyRef; }
+ bool isKVCRef() const { return LVType == KVCRef; }
+
+ bool isVolatileQualified() const { return Volatile; }
+ bool isRestrictQualified() const { return Restrict; }
+ unsigned getQualifiers() const {
+ return (Volatile ? QualType::Volatile : 0) |
+ (Restrict ? QualType::Restrict : 0);
+ }
+
+ bool isObjCIvar() const { return Ivar; }
+ bool isNonGC () const { return NonGC; }
+ bool isGlobalObjCRef() const { return GlobalObjCRef; }
+ bool isObjCWeak() const { return ObjCType == Weak; }
+ bool isObjCStrong() const { return ObjCType == Strong; }
+
+ static void SetObjCIvar(LValue& R, bool iValue) {
+ R.Ivar = iValue;
+ }
+
+ static void SetGlobalObjCRef(LValue& R, bool iValue) {
+ R.GlobalObjCRef = iValue;
+ }
+
+ static void SetObjCNonGC(LValue& R, bool iValue) {
+ R.NonGC = iValue;
+ }
+ static void SetObjCType(QualType::GCAttrTypes GCAttrs, LValue& R) {
+ if (GCAttrs == QualType::Weak)
+ R.ObjCType = Weak;
+ else if (GCAttrs == QualType::Strong)
+ R.ObjCType = Strong;
+ else
+ R.ObjCType = None;
+ }
+
+ // simple lvalue
+ llvm::Value *getAddress() const { assert(isSimple()); return V; }
+ // vector elt lvalue
+ llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+ // extended vector elements.
+ llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ llvm::Constant *getExtVectorElts() const {
+ assert(isExtVectorElt());
+ return VectorElts;
+ }
+ // bitfield lvalue
+ llvm::Value *getBitfieldAddr() const { assert(isBitfield()); return V; }
+ unsigned short getBitfieldStartBit() const {
+ assert(isBitfield());
+ return BitfieldData.StartBit;
+ }
+ unsigned short getBitfieldSize() const {
+ assert(isBitfield());
+ return BitfieldData.Size;
+ }
+ bool isBitfieldSigned() const {
+ assert(isBitfield());
+ return BitfieldData.IsSigned;
+ }
+ // property ref lvalue
+ const ObjCPropertyRefExpr *getPropertyRefExpr() const {
+ assert(isPropertyRef());
+ return PropertyRefExpr;
+ }
+
+ // 'implicit' property ref lvalue
+ const ObjCKVCRefExpr *getKVCRefExpr() const {
+ assert(isKVCRef());
+ return KVCRefExpr;
+ }
+
+ static LValue MakeAddr(llvm::Value *V, unsigned Qualifiers,
+ QualType::GCAttrTypes GCAttrs = QualType::GCNone) {
+ LValue R;
+ R.LVType = Simple;
+ R.V = V;
+ SetQualifiers(Qualifiers,R);
+ SetObjCType(GCAttrs, R);
+ return R;
+ }
+
+ static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = VectorElt;
+ R.V = Vec;
+ R.VectorIdx = Idx;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = ExtVectorElt;
+ R.V = Vec;
+ R.VectorElts = Elts;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ static LValue MakeBitfield(llvm::Value *V, unsigned short StartBit,
+ unsigned short Size, bool IsSigned,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = BitField;
+ R.V = V;
+ R.BitfieldData.StartBit = StartBit;
+ R.BitfieldData.Size = Size;
+ R.BitfieldData.IsSigned = IsSigned;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ // FIXME: It is probably bad that we aren't emitting the target when we build
+ // the lvalue. However, this complicates the code a bit, and I haven't figured
+ // out how to make it go wrong yet.
+ static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
+ unsigned Qualifiers) {
+ LValue R;
+ R.LVType = PropertyRef;
+ R.PropertyRefExpr = E;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+
+ static LValue MakeKVCRef(const ObjCKVCRefExpr *E, unsigned Qualifiers) {
+ LValue R;
+ R.LVType = KVCRef;
+ R.KVCRefExpr = E;
+ SetQualifiers(Qualifiers,R);
+ return R;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
new file mode 100644
index 0000000..d6c46a8
--- /dev/null
+++ b/lib/CodeGen/CMakeLists.txt
@@ -0,0 +1,24 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangCodeGen
+ CGBuiltin.cpp
+ CGBlocks.cpp
+ CGCall.cpp
+ CGCXX.cpp
+ CGDebugInfo.cpp
+ CGDecl.cpp
+ CGExprAgg.cpp
+ CGExprComplex.cpp
+ CGExprConstant.cpp
+ CGExpr.cpp
+ CGExprScalar.cpp
+ CGObjC.cpp
+ CGObjCGNU.cpp
+ CGObjCMac.cpp
+ CGStmt.cpp
+ CodeGenFunction.cpp
+ CodeGenModule.cpp
+ CodeGenTypes.cpp
+ Mangle.cpp
+ ModuleBuilder.cpp
+ )
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..672f6da
--- /dev/null
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,714 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+ : BlockFunction(cgm, *this, Builder), CGM(cgm),
+ Target(CGM.getContext().Target),
+ DebugInfo(0), SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ CXXThisDecl(0) {
+ LLVMIntTy = ConvertType(getContext().IntTy);
+ LLVMPointerWidth = Target.getPointerWidth(0);
+}
+
+ASTContext &CodeGenFunction::getContext() const {
+ return CGM.getContext();
+}
+
+
+llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
+ llvm::BasicBlock *&BB = LabelMap[S];
+ if (BB) return BB;
+
+ // Create, but don't insert, the new block.
+ return BB = createBasicBlock(S->getName());
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
+ llvm::Value *Res = LocalDeclMap[VD];
+ assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return Res;
+}
+
+llvm::Constant *
+CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+ return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+}
+
+const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+ return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+}
+
+bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
+ // FIXME: Use positive checks instead of negative ones to be more robust in
+ // the face of extension.
+ return !T->hasPointerRepresentation() &&!T->isRealType() &&
+ !T->isVoidType() && !T->isVectorType() && !T->isFunctionType() &&
+ !T->isBlockPointerType();
+}
+
+void CodeGenFunction::EmitReturnBlock() {
+ // For cleanliness, we try to avoid emitting the return block for
+ // simple cases.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (CurBB) {
+ assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+ // We have a valid insert point, reuse it if there are no explicit
+ // jumps to the return block.
+ if (ReturnBlock->use_empty())
+ delete ReturnBlock;
+ else
+ EmitBlock(ReturnBlock);
+ return;
+ }
+
+ // Otherwise, if the return block is the target of a single direct
+ // branch then we can just put the code in that block instead. This
+ // cleans up functions which started with a unified return block.
+ if (ReturnBlock->hasOneUse()) {
+ llvm::BranchInst *BI =
+ dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
+ if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
+ // Reset insertion point and delete the branch.
+ Builder.SetInsertPoint(BI->getParent());
+ BI->eraseFromParent();
+ delete ReturnBlock;
+ return;
+ }
+ }
+
+ // FIXME: We are at an unreachable point, there is no reason to emit the block
+ // unless it has uses. However, we still need a place to put the debug
+ // region.end for now.
+
+ EmitBlock(ReturnBlock);
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+ // Finish emission of indirect switches.
+ EmitIndirectSwitches();
+
+ assert(BreakContinueStack.empty() &&
+ "mismatched push/pop in break/continue stack!");
+ assert(BlockScopes.empty() &&
+ "did not remove all blocks from block scope map!");
+ assert(CleanupEntries.empty() &&
+ "mismatched push/pop in cleanup stack!");
+
+ // Emit function epilog (to return).
+ EmitReturnBlock();
+
+ // Emit debug descriptor for function end.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(EndLoc);
+ DI->EmitRegionEnd(CurFn, Builder);
+ }
+
+ EmitFunctionEpilog(*CurFnInfo, ReturnValue);
+
+ // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+ llvm::Instruction *Ptr = AllocaInsertPt;
+ AllocaInsertPt = 0;
+ Ptr->eraseFromParent();
+}
+
+void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc) {
+ DidCallStackSave = false;
+ CurCodeDecl = CurFuncDecl = D;
+ FnRetTy = RetTy;
+ CurFn = Fn;
+ assert(CurFn->isDeclaration() && "Function already has body?");
+
+ llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+ // Create a marker to make it easy to insert allocas into the entryblock
+ // later. Don't create this with the builder, because we don't want it
+ // folded.
+ llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::Int32Ty, "",
+ EntryBB);
+ if (Builder.isNamePreserving())
+ AllocaInsertPt->setName("allocapt");
+
+ ReturnBlock = createBasicBlock("return");
+ ReturnValue = 0;
+ if (!RetTy->isVoidType())
+ ReturnValue = CreateTempAlloca(ConvertType(RetTy), "retval");
+
+ Builder.SetInsertPoint(EntryBB);
+
+ // Emit subprogram debug descriptor.
+ // FIXME: The cast here is a huge hack.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(StartLoc);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ DI->EmitFunctionStart(CGM.getMangledName(FD), RetTy, CurFn, Builder);
+ } else {
+ // Just use LLVM function name.
+ DI->EmitFunctionStart(Fn->getName().c_str(),
+ RetTy, CurFn, Builder);
+ }
+ }
+
+ // FIXME: Leaked.
+ CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args);
+ EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+ // If any of the arguments have a variably modified type, make sure to
+ // emit the type size.
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ QualType Ty = i->second;
+
+ if (Ty->isVariablyModifiedType())
+ EmitVLASize(Ty);
+ }
+}
+
+void CodeGenFunction::GenerateCode(const FunctionDecl *FD,
+ llvm::Function *Fn) {
+ // Check if we should generate debug info for this function.
+ if (CGM.getDebugInfo() && !FD->hasAttr<NodebugAttr>())
+ DebugInfo = CGM.getDebugInfo();
+
+ FunctionArgList Args;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isInstance()) {
+ // Create the implicit 'this' decl.
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, SourceLocation(),
+ &getContext().Idents.get("this"),
+ MD->getThisType(getContext()));
+ Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
+ }
+ }
+
+ if (FD->getNumParams()) {
+ const FunctionProtoType* FProto = FD->getType()->getAsFunctionProtoType();
+ assert(FProto && "Function def must have prototype!");
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(std::make_pair(FD->getParamDecl(i),
+ FProto->getArgType(i)));
+ }
+
+ // FIXME: Support CXXTryStmt here, too.
+ if (const CompoundStmt *S = FD->getCompoundBody(getContext())) {
+ StartFunction(FD, FD->getResultType(), Fn, Args, S->getLBracLoc());
+ EmitStmt(S);
+ FinishFunction(S->getRBracLoc());
+ }
+
+ // Destroy the 'this' declaration.
+ if (CXXThisDecl)
+ CXXThisDecl->Destroy(getContext());
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it. If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a label, we have to emit the code, consider something like:
+ // if (0) { ... foo: bar(); } goto foo;
+ if (isa<LabelStmt>(S))
+ return true;
+
+ // If this is a case/default statement, and we haven't seen a switch, we have
+ // to emit the code.
+ if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+ return true;
+
+ // If this is a switch statement, we want to ignore cases below it.
+ if (isa<SwitchStmt>(S))
+ IgnoreCaseStmts = true;
+
+ // Scan subexpressions for verboten labels.
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I != E; ++I)
+ if (ContainsLabel(*I, IgnoreCaseStmts))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
+/// a constant, or if it does but contains a label, return 0. If it constant
+/// folds to 'true' and does not contain a label, return 1, if it constant folds
+/// to 'false' and does not contain a label, return -1.
+int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
+ // FIXME: Rename and handle conversion of other evaluatable things
+ // to bool.
+ Expr::EvalResult Result;
+ if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
+ Result.HasSideEffects)
+ return 0; // Not foldable, not integer or not fully evaluatable.
+
+ if (CodeGenFunction::ContainsLabel(Cond))
+ return 0; // Contains a label.
+
+ return Result.Val.getInt().getBoolValue() ? 1 : -1;
+}
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks. Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
+ return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
+
+ if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+ // Handle X && Y in a condition.
+ if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
+ // If we have "1 && X", simplify the code. "0 && X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
+ // br(1 && X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X && 1", simplify the code to use an uncond branch.
+ // "X && 0" would have been constant folded to 0.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
+ // br(X && 1) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is false, we
+ // want to jump to the FalseBlock.
+ llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+ EmitBlock(LHSTrue);
+
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
+ // If we have "0 || X", simplify the code. "1 || X" would have constant
+ // folded if the case was simple enough.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
+ // br(0 || X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X || 0", simplify the code to use an uncond branch.
+ // "X || 1" would have been constant folded to 1.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
+ // br(X || 0) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is true, we
+ // want to jump to the TrueBlock.
+ llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+ EmitBlock(LHSFalse);
+
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ }
+ }
+
+ if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+ // br(!x, t, f) -> br(x, f, t)
+ if (CondUOp->getOpcode() == UnaryOperator::LNot)
+ return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+ }
+
+ if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+ // Handle ?: operator.
+
+ // Just ignore GNU ?: extension.
+ if (CondOp->getLHS()) {
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ return;
+ }
+ }
+
+ // Emit the code with the fully general case.
+ llvm::Value *CondV = EvaluateExprAsBool(Cond);
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+}
+
+/// getCGRecordLayout - Return record layout info.
+const CGRecordLayout *CodeGenFunction::getCGRecordLayout(CodeGenTypes &CGT,
+ QualType Ty) {
+ const RecordType *RTy = Ty->getAsRecordType();
+ assert (RTy && "Unexpected type. RecordType expected here.");
+
+ return CGT.getCGRecordLayout(RTy->getDecl());
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ CGM.ErrorUnsupported(S, Type, OmitOnError);
+}
+
+unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) {
+ // Use LabelIDs.size() as the new ID if one hasn't been assigned.
+ return LabelIDs.insert(std::make_pair(L, LabelIDs.size())).first->second;
+}
+
+void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+
+ // Get size and alignment info for this aggregate.
+ std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+ // Don't bother emitting a zero-byte memset.
+ if (TypeInfo.first == 0)
+ return;
+
+ // FIXME: Handle variable sized types.
+ const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
+
+ Builder.CreateCall4(CGM.getMemSetFn(), DestPtr,
+ llvm::ConstantInt::getNullValue(llvm::Type::Int8Ty),
+ // TypeInfo.first describes size in bits.
+ llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ TypeInfo.second/8));
+}
+
+void CodeGenFunction::EmitIndirectSwitches() {
+ llvm::BasicBlock *Default;
+
+ if (IndirectSwitches.empty())
+ return;
+
+ if (!LabelIDs.empty()) {
+ Default = getBasicBlockForLabel(LabelIDs.begin()->first);
+ } else {
+ // No possible targets for indirect goto, just emit an infinite
+ // loop.
+ Default = createBasicBlock("indirectgoto.loop", CurFn);
+ llvm::BranchInst::Create(Default, Default);
+ }
+
+ for (std::vector<llvm::SwitchInst*>::iterator i = IndirectSwitches.begin(),
+ e = IndirectSwitches.end(); i != e; ++i) {
+ llvm::SwitchInst *I = *i;
+
+ I->setSuccessor(0, Default);
+ for (std::map<const LabelStmt*,unsigned>::iterator LI = LabelIDs.begin(),
+ LE = LabelIDs.end(); LI != LE; ++LI) {
+ I->addCase(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ LI->second),
+ getBasicBlockForLabel(LI->first));
+ }
+ }
+}
+
+llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT)
+{
+ llvm::Value *&SizeEntry = VLASizeMap[VAT];
+
+ assert(SizeEntry && "Did not emit size for type");
+ return SizeEntry;
+}
+
+llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty)
+{
+ assert(Ty->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
+ llvm::Value *&SizeEntry = VLASizeMap[VAT];
+
+ if (!SizeEntry) {
+ // Get the element size;
+ llvm::Value *ElemSize;
+
+ QualType ElemTy = VAT->getElementType();
+
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ if (ElemTy->isVariableArrayType())
+ ElemSize = EmitVLASize(ElemTy);
+ else {
+ ElemSize = llvm::ConstantInt::get(SizeTy,
+ getContext().getTypeSize(ElemTy) / 8);
+ }
+
+ llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
+ NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
+
+ SizeEntry = Builder.CreateMul(ElemSize, NumElements);
+ }
+
+ return SizeEntry;
+ } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ EmitVLASize(AT->getElementType());
+ } else if (const PointerType *PT = Ty->getAsPointerType())
+ EmitVLASize(PT->getPointeeType());
+ else {
+ assert(0 && "unknown VM type!");
+ }
+
+ return 0;
+}
+
+llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+ if (CGM.getContext().getBuiltinVaListType()->isArrayType()) {
+ return EmitScalarExpr(E);
+ }
+ return EmitLValue(E).getAddress();
+}
+
+void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupBlock)
+{
+ CleanupEntries.push_back(CleanupEntry(CleanupBlock));
+}
+
+void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize)
+{
+ assert(CleanupEntries.size() >= OldCleanupStackSize &&
+ "Cleanup stack mismatch!");
+
+ while (CleanupEntries.size() > OldCleanupStackSize)
+ EmitCleanupBlock();
+}
+
+CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock()
+{
+ CleanupEntry &CE = CleanupEntries.back();
+
+ llvm::BasicBlock *CleanupBlock = CE.CleanupBlock;
+
+ std::vector<llvm::BasicBlock *> Blocks;
+ std::swap(Blocks, CE.Blocks);
+
+ std::vector<llvm::BranchInst *> BranchFixups;
+ std::swap(BranchFixups, CE.BranchFixups);
+
+ CleanupEntries.pop_back();
+
+ // Check if any branch fixups pointed to the scope we just popped. If so,
+ // we can remove them.
+ for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+ llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
+ BlockScopeMap::iterator I = BlockScopes.find(Dest);
+
+ if (I == BlockScopes.end())
+ continue;
+
+ assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
+
+ if (I->second == CleanupEntries.size()) {
+ // We don't need to do this branch fixup.
+ BranchFixups[i] = BranchFixups.back();
+ BranchFixups.pop_back();
+ i--;
+ e--;
+ continue;
+ }
+ }
+
+ llvm::BasicBlock *SwitchBlock = 0;
+ llvm::BasicBlock *EndBlock = 0;
+ if (!BranchFixups.empty()) {
+ SwitchBlock = createBasicBlock("cleanup.switch");
+ EndBlock = createBasicBlock("cleanup.end");
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ Builder.SetInsertPoint(SwitchBlock);
+
+ llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::Int32Ty,
+ "cleanup.dst");
+ llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+
+ // Create a switch instruction to determine where to jump next.
+ llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
+ BranchFixups.size());
+
+ // Restore the current basic block (if any)
+ if (CurBB) {
+ Builder.SetInsertPoint(CurBB);
+
+ // If we had a current basic block, we also need to emit an instruction
+ // to initialize the cleanup destination.
+ Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::Int32Ty),
+ DestCodePtr);
+ } else
+ Builder.ClearInsertionPoint();
+
+ for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+ llvm::BranchInst *BI = BranchFixups[i];
+ llvm::BasicBlock *Dest = BI->getSuccessor(0);
+
+ // Fixup the branch instruction to point to the cleanup block.
+ BI->setSuccessor(0, CleanupBlock);
+
+ if (CleanupEntries.empty()) {
+ llvm::ConstantInt *ID;
+
+ // Check if we already have a destination for this block.
+ if (Dest == SI->getDefaultDest())
+ ID = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ else {
+ ID = SI->findCaseDest(Dest);
+ if (!ID) {
+ // No code found, get a new unique one by using the number of
+ // switch successors.
+ ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ SI->getNumSuccessors());
+ SI->addCase(ID, Dest);
+ }
+ }
+
+ // Store the jump destination before the branch instruction.
+ new llvm::StoreInst(ID, DestCodePtr, BI);
+ } else {
+ // We need to jump through another cleanup block. Create a pad block
+ // with a branch instruction that jumps to the final destination and
+ // add it as a branch fixup to the current cleanup scope.
+
+ // Create the pad block.
+ llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
+
+ // Create a unique case ID.
+ llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ SI->getNumSuccessors());
+
+ // Store the jump destination before the branch instruction.
+ new llvm::StoreInst(ID, DestCodePtr, BI);
+
+ // Add it as the destination.
+ SI->addCase(ID, CleanupPad);
+
+ // Create the branch to the final destination.
+ llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
+ CleanupPad->getInstList().push_back(BI);
+
+ // And add it as a branch fixup.
+ CleanupEntries.back().BranchFixups.push_back(BI);
+ }
+ }
+ }
+
+ // Remove all blocks from the block scope map.
+ for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
+ assert(BlockScopes.count(Blocks[i]) &&
+ "Did not find block in scope map!");
+
+ BlockScopes.erase(Blocks[i]);
+ }
+
+ return CleanupBlockInfo(CleanupBlock, SwitchBlock, EndBlock);
+}
+
+void CodeGenFunction::EmitCleanupBlock()
+{
+ CleanupBlockInfo Info = PopCleanupBlock();
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ if (CurBB && !CurBB->getTerminator() &&
+ Info.CleanupBlock->getNumUses() == 0) {
+ CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
+ delete Info.CleanupBlock;
+ } else
+ EmitBlock(Info.CleanupBlock);
+
+ if (Info.SwitchBlock)
+ EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ EmitBlock(Info.EndBlock);
+}
+
+void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI)
+{
+ assert(!CleanupEntries.empty() &&
+ "Trying to add branch fixup without cleanup block!");
+
+ // FIXME: We could be more clever here and check if there's already a branch
+ // fixup for this destination and recycle it.
+ CleanupEntries.back().BranchFixups.push_back(BI);
+}
+
+void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest)
+{
+ if (!HaveInsertPoint())
+ return;
+
+ llvm::BranchInst* BI = Builder.CreateBr(Dest);
+
+ Builder.ClearInsertionPoint();
+
+ // The stack is empty, no need to do any cleanup.
+ if (CleanupEntries.empty())
+ return;
+
+ if (!Dest->getParent()) {
+ // We are trying to branch to a block that hasn't been inserted yet.
+ AddBranchFixup(BI);
+ return;
+ }
+
+ BlockScopeMap::iterator I = BlockScopes.find(Dest);
+ if (I == BlockScopes.end()) {
+ // We are trying to jump to a block that is outside of any cleanup scope.
+ AddBranchFixup(BI);
+ return;
+ }
+
+ assert(I->second < CleanupEntries.size() &&
+ "Trying to branch into cleanup region");
+
+ if (I->second == CleanupEntries.size() - 1) {
+ // We have a branch to a block in the same scope.
+ return;
+ }
+
+ AddBranchFixup(BI);
+}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..b7894a4
--- /dev/null
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,900 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
+#define CLANG_CODEGEN_CODEGENFUNCTION_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ValueHandle.h"
+#include <map>
+#include "CGBlocks.h"
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Module;
+ class SwitchInst;
+ class Value;
+}
+
+namespace clang {
+ class ASTContext;
+ class CXXDestructorDecl;
+ class Decl;
+ class EnumConstantDecl;
+ class FunctionDecl;
+ class FunctionProtoType;
+ class LabelStmt;
+ class ObjCContainerDecl;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCPropertyImplDecl;
+ class TargetInfo;
+ class VarDecl;
+ class ObjCForCollectionStmt;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenTypes;
+ class CGDebugInfo;
+ class CGFunctionInfo;
+ class CGRecordLayout;
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public BlockFunction {
+ CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+public:
+ CodeGenModule &CGM; // Per-module state.
+ TargetInfo &Target;
+
+ typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+ CGBuilderTy Builder;
+
+ /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
+ /// This excludes BlockDecls.
+ const Decl *CurFuncDecl;
+ /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+ const Decl *CurCodeDecl;
+ const CGFunctionInfo *CurFnInfo;
+ QualType FnRetTy;
+ llvm::Function *CurFn;
+
+ /// ReturnBlock - Unified return block.
+ llvm::BasicBlock *ReturnBlock;
+ /// ReturnValue - The temporary alloca to hold the return value. This is null
+ /// iff the function has no return value.
+ llvm::Instruction *ReturnValue;
+
+ /// AllocaInsertPoint - This is an instruction in the entry block before which
+ /// we prefer to insert allocas.
+ llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+ const llvm::Type *LLVMIntTy;
+ uint32_t LLVMPointerWidth;
+
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+ /// PushCleanupBlock - Push a new cleanup entry on the stack and set the
+ /// passed in block as the cleanup block.
+ void PushCleanupBlock(llvm::BasicBlock *CleanupBlock);
+
+ /// CleanupBlockInfo - A struct representing a popped cleanup block.
+ struct CleanupBlockInfo {
+ /// CleanupBlock - the cleanup block
+ llvm::BasicBlock *CleanupBlock;
+
+ /// SwitchBlock - the block (if any) containing the switch instruction used
+ /// for jumping to the final destination.
+ llvm::BasicBlock *SwitchBlock;
+
+ /// EndBlock - the default destination for the switch instruction.
+ llvm::BasicBlock *EndBlock;
+
+ CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb,
+ llvm::BasicBlock *eb)
+ : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb) {}
+ };
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all
+ /// branch fixups and return a block info struct with the switch block and end
+ /// block.
+ CleanupBlockInfo PopCleanupBlock();
+
+ /// CleanupScope - RAII object that will create a cleanup block and set the
+ /// insert point to that block. When destructed, it sets the insert point to
+ /// the previous block and pushes a new cleanup entry on the stack.
+ class CleanupScope {
+ CodeGenFunction& CGF;
+ llvm::BasicBlock *CurBB;
+ llvm::BasicBlock *CleanupBB;
+
+ public:
+ CleanupScope(CodeGenFunction &cgf)
+ : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()) {
+ CleanupBB = CGF.createBasicBlock("cleanup");
+ CGF.Builder.SetInsertPoint(CleanupBB);
+ }
+
+ ~CleanupScope() {
+ CGF.PushCleanupBlock(CleanupBB);
+ CGF.Builder.SetInsertPoint(CurBB);
+ }
+ };
+
+ /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
+ /// blocks that have been added.
+ void EmitCleanupBlocks(size_t OldCleanupStackSize);
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert block
+ /// through the cleanup handling code (if any) and then on to \arg Dest.
+ ///
+ /// FIXME: Maybe this should really be in EmitBranch? Don't we always want
+ /// this behavior for branches?
+ void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+
+private:
+ CGDebugInfo* DebugInfo;
+
+ /// LabelIDs - Track arbitrary ids assigned to labels for use in implementing
+ /// the GCC address-of-label extension and indirect goto. IDs are assigned to
+ /// labels inside getIDForAddrOfLabel().
+ std::map<const LabelStmt*, unsigned> LabelIDs;
+
+ /// IndirectSwitches - Record the list of switches for indirect
+ /// gotos. Emission of the actual switching code needs to be delayed until all
+ /// AddrLabelExprs have been seen.
+ std::vector<llvm::SwitchInst*> IndirectSwitches;
+
+ /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+ /// decls.
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+
+ /// LabelMap - This keeps track of the LLVM basic block for each C label.
+ llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap;
+
+ // BreakContinueStack - This keeps track of where break and continue
+ // statements should jump to.
+ struct BreakContinue {
+ BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb)
+ : BreakBlock(bb), ContinueBlock(cb) {}
+
+ llvm::BasicBlock *BreakBlock;
+ llvm::BasicBlock *ContinueBlock;
+ };
+ llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ /// SwitchInsn - This is nearest current switch instruction. It is null if if
+ /// current context is not in a switch.
+ llvm::SwitchInst *SwitchInsn;
+
+ /// CaseRangeBlock - This block holds if condition check for last case
+ /// statement range in current switch instruction.
+ llvm::BasicBlock *CaseRangeBlock;
+
+ /// InvokeDest - This is the nearest exception target for calls
+ /// which can unwind, when exceptions are being used.
+ llvm::BasicBlock *InvokeDest;
+
+ // VLASizeMap - This keeps track of the associated size for each VLA type.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const VariableArrayType*, llvm::Value*> VLASizeMap;
+
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
+ struct CleanupEntry {
+ /// CleanupBlock - The block of code that does the actual cleanup.
+ llvm::BasicBlock *CleanupBlock;
+
+ /// Blocks - Basic blocks that were emitted in the current cleanup scope.
+ std::vector<llvm::BasicBlock *> Blocks;
+
+ /// BranchFixups - Branch instructions to basic blocks that haven't been
+ /// inserted into the current function yet.
+ std::vector<llvm::BranchInst *> BranchFixups;
+
+ explicit CleanupEntry(llvm::BasicBlock *cb)
+ : CleanupBlock(cb) {}
+ };
+
+ /// CleanupEntries - Stack of cleanup entries.
+ llvm::SmallVector<CleanupEntry, 8> CleanupEntries;
+
+ typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap;
+
+ /// BlockScopes - Map of which "cleanup scope" scope basic blocks have.
+ BlockScopeMap BlockScopes;
+
+ /// CXXThisDecl - When parsing an C++ function, this will hold the implicit
+ /// 'this' declaration.
+ ImplicitParamDecl *CXXThisDecl;
+
+ llvm::SmallVector<const CXXTemporary*, 4> LiveTemporaries;
+
+public:
+ CodeGenFunction(CodeGenModule &cgm);
+
+ ASTContext &getContext() const;
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+
+ llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
+ void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C
+ //===--------------------------------------------------------------------===//
+
+ void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+ void StartObjCMethod(const ObjCMethodDecl *MD,
+ const ObjCContainerDecl *CD);
+
+ /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+ void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+
+ /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+ /// for the given property.
+ void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
+ llvm::Constant *BuildDescriptorBlockDecl(bool BlockHasCopyDispose,
+ uint64_t Size,
+ const llvm::StructType *,
+ std::vector<HelperInfo> *);
+
+ llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr,
+ const BlockInfo& Info,
+ const Decl *OuterFuncDecl,
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm,
+ uint64_t &Size, uint64_t &Align,
+ llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
+ bool &subBlockHasCopyDispose);
+
+ void BlockForwardSelf();
+ llvm::Value *LoadBlockStruct();
+
+ llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E);
+
+ const llvm::Type *BuildByRefType(QualType Ty, uint64_t Align);
+
+ void GenerateCode(const FunctionDecl *FD,
+ llvm::Function *Fn);
+ void StartFunction(const Decl *D, QualType RetTy,
+ llvm::Function *Fn,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc);
+
+ /// EmitReturnBlock - Emit the unified return block, trying to avoid its
+ /// emission when possible.
+ void EmitReturnBlock();
+
+ /// FinishFunction - Complete IR generation of the current function. It is
+ /// legal to call this function even if there is no current insertion point.
+ void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+ /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+ /// arguments for the given function. This is also responsible for naming the
+ /// LLVM function arguments.
+ void EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args);
+
+ /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+ /// given temporary.
+ void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue);
+
+ const llvm::Type *ConvertTypeForMem(QualType T);
+ const llvm::Type *ConvertType(QualType T);
+
+ /// LoadObjCSelf - Load the value of self. This function is only valid while
+ /// generating code for an Objective-C method.
+ llvm::Value *LoadObjCSelf();
+
+ /// TypeOfSelfObject - Return type of object that this self represents.
+ QualType TypeOfSelfObject();
+
+ /// hasAggregateLLVMType - Return true if the specified AST type will map into
+ /// an aggregate LLVM type or is void.
+ static bool hasAggregateLLVMType(QualType T);
+
+ /// createBasicBlock - Create an LLVM basic block.
+ llvm::BasicBlock *createBasicBlock(const char *Name="",
+ llvm::Function *Parent=0,
+ llvm::BasicBlock *InsertBefore=0) {
+#ifdef NDEBUG
+ return llvm::BasicBlock::Create("", Parent, InsertBefore);
+#else
+ return llvm::BasicBlock::Create(Name, Parent, InsertBefore);
+#endif
+ }
+
+ /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+ /// label maps to.
+ llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S);
+
+ /// SimplifyForwardingBlocks - If the given basic block is only a
+ /// branch to another basic block, simplify it. This assumes that no
+ /// other code could potentially reference the basic block.
+ void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+ /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+ /// adding a fall-through branch from the current insert block if
+ /// necessary. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// IsFinished - If true, indicates that the caller has finished emitting
+ /// branches to the given block and does not expect to emit code into it. This
+ /// means the block can be ignored if it is unreachable.
+ void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+ /// EmitBranch - Emit a branch to the specified basic block from the current
+ /// insert block, taking care to avoid creation of branches from dummy
+ /// blocks. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// This function clears the current insertion point. The caller should follow
+ /// calls to this function with calls to Emit*Block prior to generation new
+ /// code.
+ void EmitBranch(llvm::BasicBlock *Block);
+
+ /// HaveInsertPoint - True if an insertion point is defined. If not, this
+ /// indicates that the current code being emitted is unreachable.
+ bool HaveInsertPoint() const {
+ return Builder.GetInsertBlock() != 0;
+ }
+
+ /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+ /// emitted IR has a place to go. Note that by definition, if this function
+ /// creates a block then that block is unreachable; callers may do better to
+ /// detect when no insertion point is defined and simply skip IR generation.
+ void EnsureInsertPoint() {
+ if (!HaveInsertPoint())
+ EmitBlock(createBasicBlock());
+ }
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ //===--------------------------------------------------------------------===//
+ // Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+ /// block.
+ llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
+ const char *Name = "tmp");
+
+ /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+ /// expression and compare the result against zero, returning an Int1Ty value.
+ llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+ /// EmitAnyExpr - Emit code to compute the specified expression which can have
+ /// any type. The result is returned as an RValue struct. If this is an
+ /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+ /// the result should be returned.
+ ///
+ /// \param IgnoreResult - True if the resulting value isn't used.
+ RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0,
+ bool isAggLocVolatile = false, bool IgnoreResult = false);
+
+ // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+ // or the value of the expression, depending on how va_list is defined.
+ llvm::Value *EmitVAListRef(const Expr *E);
+
+ /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// always be accessible even if no aggregate location is provided.
+ RValue EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc = 0,
+ bool isAggLocVolatile = false);
+
+ /// EmitAggregateCopy - Emit an aggrate copy.
+ ///
+ /// \param isVolatile - True iff either the source or the destination is
+ /// volatile.
+ void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false);
+
+ void EmitAggregateClear(llvm::Value *DestPtr, QualType Ty);
+
+ /// StartBlock - Start new block named N. If insert block is a dummy block
+ /// then reuse it.
+ void StartBlock(const char *N);
+
+ /// getCGRecordLayout - Return record layout info.
+ const CGRecordLayout *getCGRecordLayout(CodeGenTypes &CGT, QualType RTy);
+
+ /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
+ llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD);
+
+ /// GetAddrOfLocalVar - Return the address of a local variable.
+ llvm::Value *GetAddrOfLocalVar(const VarDecl *VD);
+
+ /// getAccessedFieldNo - Given an encoded value and a result number, return
+ /// the input field number being accessed.
+ static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+ unsigned GetIDForAddrOfLabel(const LabelStmt *L);
+
+ /// EmitMemSetToZero - Generate code to memset a value of the given type to 0.
+ void EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty);
+
+ // EmitVAArg - Generate code to get an argument from the passed in pointer
+ // and update it accordingly. The return value is a pointer to the argument.
+ // FIXME: We should be able to get rid of this method and use the va_arg
+ // instruction in LLVM instead once it works well enough.
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+
+ // EmitVLASize - Generate code for any VLA size expressions that might occur
+ // in a variably modified type. If Ty is a VLA, will return the value that
+ // corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
+ llvm::Value *EmitVLASize(QualType Ty);
+
+ // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
+ // of a variable length array type.
+ llvm::Value *GetVLASize(const VariableArrayType *);
+
+ /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+ /// generating code for an C++ member function.
+ llvm::Value *LoadCXXThis();
+
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::Value *This);
+
+ void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
+
+ llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Declaration Emission
+ //===--------------------------------------------------------------------===//
+
+ void EmitDecl(const Decl &D);
+ void EmitBlockVarDecl(const VarDecl &D);
+ void EmitLocalBlockVarDecl(const VarDecl &D);
+ void EmitStaticBlockVarDecl(const VarDecl &D);
+
+ /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+ void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+ void EmitStopPoint(const Stmt *S);
+
+ /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+ /// this function even if there is no current insertion point.
+ ///
+ /// This function may clear the current insertion point; callers should use
+ /// EnsureInsertPoint if they wish to subsequently generate code without first
+ /// calling EmitBlock, EmitBranch, or EmitStmt.
+ void EmitStmt(const Stmt *S);
+
+ /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+ /// necessarily require an insertion point or debug information; typically
+ /// because the statement amounts to a jump or a container of other
+ /// statements.
+ ///
+ /// \return True if the statement was handled.
+ bool EmitSimpleStmt(const Stmt *S);
+
+ RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ llvm::Value *AggLoc = 0, bool isAggVol = false);
+
+ /// EmitLabel - Emit the block for the given label. It is legal to call this
+ /// function even if there is no current insertion point.
+ void EmitLabel(const LabelStmt &S); // helper for EmitLabelStmt.
+
+ void EmitLabelStmt(const LabelStmt &S);
+ void EmitGotoStmt(const GotoStmt &S);
+ void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+ void EmitIfStmt(const IfStmt &S);
+ void EmitWhileStmt(const WhileStmt &S);
+ void EmitDoStmt(const DoStmt &S);
+ void EmitForStmt(const ForStmt &S);
+ void EmitReturnStmt(const ReturnStmt &S);
+ void EmitDeclStmt(const DeclStmt &S);
+ void EmitBreakStmt(const BreakStmt &S);
+ void EmitContinueStmt(const ContinueStmt &S);
+ void EmitSwitchStmt(const SwitchStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S);
+ void EmitCaseStmt(const CaseStmt &S);
+ void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitAsmStmt(const AsmStmt &S);
+
+ void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+ void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+ void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+ void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+
+ //===--------------------------------------------------------------------===//
+ // LValue Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+ RValue GetUndefRValue(QualType Ty);
+
+ /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+ /// and issue an ErrorUnsupported style diagnostic (using the
+ /// provided Name).
+ RValue EmitUnsupportedRValue(const Expr *E,
+ const char *Name);
+
+ /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+ /// an ErrorUnsupported style diagnostic (using the provided Name).
+ LValue EmitUnsupportedLValue(const Expr *E,
+ const char *Name);
+
+ /// EmitLValue - Emit code to compute a designator that specifies the location
+ /// of the expression.
+ ///
+ /// This can return one of two things: a simple address or a bitfield
+ /// reference. In either case, the LLVM Value* in the LValue structure is
+ /// guaranteed to be an LLVM pointer type.
+ ///
+ /// If this returns a bitfield reference, nothing about the pointee type of
+ /// the LLVM value is known: For example, it may not be a pointer to an
+ /// integer.
+ ///
+ /// If this returns a normal address, and if the lvalue's C type is fixed
+ /// size, this method guarantees that the returned pointer type will point to
+ /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+ /// variable length type, this is not possible.
+ ///
+ LValue EmitLValue(const Expr *E);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ QualType Ty);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, QualType Ty);
+
+ /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+ /// this method emits the address of the lvalue, then loads the result as an
+ /// rvalue, returning the rvalue.
+ RValue EmitLoadOfLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
+ RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfPropertyRefLValue(LValue LV, QualType ExprType);
+ RValue EmitLoadOfKVCRefLValue(LValue LV, QualType ExprType);
+
+
+ /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+ /// lvalue, where both are guaranteed to the have the same type, and that type
+ /// is 'Ty'.
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
+ QualType Ty);
+ void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst, QualType Ty);
+ void EmitStoreThroughKVCRefLValue(RValue Src, LValue Dst, QualType Ty);
+
+ /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
+ /// EmitStoreThroughLValue.
+ ///
+ /// \param Result [out] - If non-null, this will be set to a Value* for the
+ /// bit-field contents after the store, appropriate for use as the result of
+ /// an assignment to the bit-field.
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
+ llvm::Value **Result=0);
+
+ // Note: only availabe for agg return types
+ LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ // Note: only available for agg return types
+ LValue EmitCallExprLValue(const CallExpr *E);
+ // Note: only available for agg return types
+ LValue EmitVAArgExprLValue(const VAArgExpr *E);
+ LValue EmitDeclRefLValue(const DeclRefExpr *E);
+ LValue EmitStringLiteralLValue(const StringLiteral *E);
+ LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+ LValue EmitPredefinedFunctionName(unsigned Type);
+ LValue EmitPredefinedLValue(const PredefinedExpr *E);
+ LValue EmitUnaryOpLValue(const UnaryOperator *E);
+ LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ LValue EmitMemberExpr(const MemberExpr *E);
+ LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitConditionalOperator(const ConditionalOperator *E);
+ LValue EmitCastLValue(const CastExpr *E);
+
+ llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForField(llvm::Value* Base, FieldDecl* Field,
+ bool isUnion, unsigned CVRQualifiers);
+ LValue EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value* Base, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForBitfield(llvm::Value* Base, FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
+
+ LValue EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E);
+ LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+ LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+
+ LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+ LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+ LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
+ LValue EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E);
+ LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
+ LValue EmitStmtExprLValue(const StmtExpr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Scalar Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitCall - Generate a call of the given function, expecting the given
+ /// result type, and using the given argument list which specifies both the
+ /// LLVM arguments and the types they were derived from.
+ ///
+ /// \param TargetDecl - If given, the decl of the function in a
+ /// direct call; used to set attributes on the call (noreturn,
+ /// etc.).
+ RValue EmitCall(const CGFunctionInfo &FnInfo,
+ llvm::Value *Callee,
+ const CallArgList &Args,
+ const Decl *TargetDecl = 0);
+
+ RValue EmitCall(llvm::Value *Callee, QualType FnType,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl = 0);
+ RValue EmitCallExpr(const CallExpr *E);
+
+ RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+ RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E);
+
+ RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD);
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E);
+
+ RValue EmitBlockCallExpr(const CallExpr *E);
+
+ /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+ /// is unhandled by the current target.
+ llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitShuffleVector(llvm::Value* V1, llvm::Value *V2, ...);
+ llvm::Value *EmitVector(llvm::Value * const *Vals, unsigned NumVals,
+ bool isSplat = false);
+
+ llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+ llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E);
+ RValue EmitObjCPropertyGet(const Expr *E);
+ RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S);
+ void EmitObjCPropertySet(const Expr *E, RValue Src);
+ void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
+
+
+ /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
+ /// expression. Will emit a temporary variable if E is not an LValue.
+ RValue EmitReferenceBindingToExpr(const Expr* E, QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ // Expressions are broken into three classes: scalar, complex, aggregate.
+
+ /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+ /// scalar type, returning the result.
+ llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign=false);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+ QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+ QualType DstTy);
+
+
+ /// EmitAggExpr - Emit the computation of the specified expression of
+ /// aggregate type. The result is computed into DestPtr. Note that if
+ /// DestPtr is null, the value of the aggregate expression is not needed.
+ void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest,
+ bool IgnoreResult = false);
+
+ /// EmitComplexExpr - Emit the computation of the specified expression of
+ /// complex type, returning the result.
+ ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
+ bool IgnoreImag = false,
+ bool IgnoreRealAssign = false,
+ bool IgnoreImagAssign = false);
+
+ /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+ /// of complex type, storing into the specified Value*.
+ void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+
+ /// StoreComplexToAddr - Store a complex number into the specified address.
+ void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+ /// LoadComplexFromAddr - Load a complex number from the specified address.
+ ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
+
+ /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global
+ /// for a static block var decl.
+ llvm::GlobalVariable * CreateStaticBlockVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes
+ Linkage);
+
+ /// GenerateStaticCXXBlockVarDecl - Create the initializer for a C++
+ /// runtime initialized static block var decl.
+ void GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+ void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
+
+ RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+ llvm::Value *AggLoc = 0,
+ bool isAggLocVolatile = false);
+
+ //===--------------------------------------------------------------------===//
+ // Internal Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// ContainsLabel - Return true if the statement contains a label in it. If
+ /// this statement is not executed normally, it not containing a label means
+ /// that we can just remove the code.
+ static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return 0. If it
+ /// constant folds to 'true' and does not contain a label, return 1, if it
+ /// constant folds to 'false' and does not contain a label, return -1.
+ int ConstantFoldsToSimpleInteger(const Expr *Cond);
+
+ /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+ /// if statement) to the specified blocks. Based on the condition, this might
+ /// try to simplify the codegen of the conditional based on the branch.
+ void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock);
+private:
+
+ /// EmitIndirectSwitches - Emit code for all of the switch
+ /// instructions in IndirectSwitches.
+ void EmitIndirectSwitches();
+
+ void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+ /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+ /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+ ///
+ /// \param AI - The first function argument of the expansion.
+ /// \return The argument following the last expanded function
+ /// argument.
+ llvm::Function::arg_iterator
+ ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ llvm::Function::arg_iterator AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg Args. See
+ /// ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue Src,
+ llvm::SmallVector<llvm::Value*, 16> &Args);
+
+ llvm::Value* EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr, std::string &ConstraintStr);
+
+ /// EmitCleanupBlock - emits a single cleanup block.
+ void EmitCleanupBlock();
+
+ /// AddBranchFixup - adds a branch instruction to the list of fixups for the
+ /// current cleanup scope.
+ void AddBranchFixup(llvm::BranchInst *BI);
+
+ /// EmitCallArg - Emit a single call argument.
+ RValue EmitCallArg(const Expr *E, QualType ArgType);
+
+ /// EmitCallArgs - Emit call arguments for a function.
+ /// The CallArgTypeInfo parameter is used for iterating over the known
+ /// argument types of the function being called.
+ template<typename T>
+ void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+
+ // First, use the argument types that the type info knows about
+ if (CallArgTypeInfo) {
+ for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
+ E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
+ QualType ArgType = *I;
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ }
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+ ArgType));
+ }
+ }
+};
+
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..b69301e
--- /dev/null
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,1543 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGCall.h"
+#include "CGObjCRuntime.h"
+#include "Mangle.h"
+#include "clang/Frontend/CompileOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Module.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+CodeGenModule::CodeGenModule(ASTContext &C, const CompileOptions &compileOpts,
+ llvm::Module &M, const llvm::TargetData &TD,
+ Diagnostic &diags)
+ : BlockModule(C, M, TD, Types, *this), Context(C),
+ Features(C.getLangOptions()), CompileOpts(compileOpts), TheModule(M),
+ TheTargetData(TD), Diags(diags), Types(C, M, TD), Runtime(0),
+ MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0) {
+
+ if (!Features.ObjC1)
+ Runtime = 0;
+ else if (!Features.NeXTRuntime)
+ Runtime = CreateGNUObjCRuntime(*this);
+ else if (Features.ObjCNonFragileABI)
+ Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+ else
+ Runtime = CreateMacObjCRuntime(*this);
+
+ // If debug info generation is enabled, create the CGDebugInfo object.
+ DebugInfo = CompileOpts.DebugInfo ? new CGDebugInfo(this) : 0;
+}
+
+CodeGenModule::~CodeGenModule() {
+ delete Runtime;
+ delete DebugInfo;
+}
+
+void CodeGenModule::Release() {
+ EmitDeferred();
+ if (Runtime)
+ if (llvm::Function *ObjCInitFunction = Runtime->ModuleInitFunction())
+ AddGlobalCtor(ObjCInitFunction);
+ EmitCtorList(GlobalCtors, "llvm.global_ctors");
+ EmitCtorList(GlobalDtors, "llvm.global_dtors");
+ EmitAnnotations();
+ EmitLLVMUsed();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+ << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+LangOptions::VisibilityMode
+CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (VD->getStorageClass() == VarDecl::PrivateExtern)
+ return LangOptions::Hidden;
+
+ if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
+ switch (attr->getVisibility()) {
+ default: assert(0 && "Unknown visibility!");
+ case VisibilityAttr::DefaultVisibility:
+ return LangOptions::Default;
+ case VisibilityAttr::HiddenVisibility:
+ return LangOptions::Hidden;
+ case VisibilityAttr::ProtectedVisibility:
+ return LangOptions::Protected;
+ }
+ }
+
+ return getLangOptions().getVisibilityMode();
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+ const Decl *D) const {
+ // Internal definitions always have default visibility.
+ if (GV->hasLocalLinkage()) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ return;
+ }
+
+ switch (getDeclVisibilityMode(D)) {
+ default: assert(0 && "Unknown visibility!");
+ case LangOptions::Default:
+ return GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ case LangOptions::Hidden:
+ return GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ case LangOptions::Protected:
+ return GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ }
+}
+
+const char *CodeGenModule::getMangledName(const GlobalDecl &GD) {
+ const NamedDecl *ND = GD.getDecl();
+
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ return getMangledCXXCtorName(D, GD.getCtorType());
+ if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ return getMangledCXXDtorName(D, GD.getDtorType());
+
+ return getMangledName(ND);
+}
+
+/// \brief Retrieves the mangled name for the given declaration.
+///
+/// If the given declaration requires a mangled name, returns an
+/// const char* containing the mangled name. Otherwise, returns
+/// the unmangled name.
+///
+const char *CodeGenModule::getMangledName(const NamedDecl *ND) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getLangOptions().CPlusPlus && !ND->hasAttrs()) {
+ assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
+ return ND->getNameAsCString();
+ }
+
+ llvm::SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ if (!mangleName(ND, Context, Out)) {
+ assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
+ return ND->getNameAsCString();
+ }
+
+ Name += '\0';
+ return UniqueMangledName(Name.begin(), Name.end());
+}
+
+const char *CodeGenModule::UniqueMangledName(const char *NameStart,
+ const char *NameEnd) {
+ assert(*(NameEnd - 1) == '\0' && "Mangled name must be null terminated!");
+
+ return MangledNames.GetOrCreateValue(NameStart, NameEnd).getKeyData();
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalCtors.push_back(std::make_pair(Ctor, Priority));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalDtors.push_back(std::make_pair(Dtor, Priority));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+ // Ctor function type is void()*.
+ llvm::FunctionType* CtorFTy =
+ llvm::FunctionType::get(llvm::Type::VoidTy,
+ std::vector<const llvm::Type*>(),
+ false);
+ llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+ // Get the type of a ctor entry, { i32, void ()* }.
+ llvm::StructType* CtorStructTy =
+ llvm::StructType::get(llvm::Type::Int32Ty,
+ llvm::PointerType::getUnqual(CtorFTy), NULL);
+
+ // Construct the constructor and destructor arrays.
+ std::vector<llvm::Constant*> Ctors;
+ for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ std::vector<llvm::Constant*> S;
+ S.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, I->second, false));
+ S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
+ Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ }
+
+ if (!Ctors.empty()) {
+ llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+ new llvm::GlobalVariable(AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Ctors),
+ GlobalName,
+ &TheModule);
+ }
+}
+
+void CodeGenModule::EmitAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array =
+ llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
+ Annotations.size()),
+ Annotations);
+ llvm::GlobalValue *gv =
+ new llvm::GlobalVariable(Array->getType(), false,
+ llvm::GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations", &TheModule);
+ gv->setSection("llvm.metadata");
+}
+
+static CodeGenModule::GVALinkage
+GetLinkageForFunction(const FunctionDecl *FD, const LangOptions &Features) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ // C++ member functions defined inside the class are always inline.
+ if (MD->isInline() || !MD->isOutOfLineDefinition())
+ return CodeGenModule::GVA_CXXInline;
+
+ return CodeGenModule::GVA_StrongExternal;
+ }
+
+ // "static" functions get internal linkage.
+ if (FD->getStorageClass() == FunctionDecl::Static)
+ return CodeGenModule::GVA_Internal;
+
+ if (!FD->isInline())
+ return CodeGenModule::GVA_StrongExternal;
+
+ // If the inline function explicitly has the GNU inline attribute on it, or if
+ // this is C89 mode, we use to GNU semantics.
+ if (!Features.C99 && !Features.CPlusPlus) {
+ // extern inline in GNU mode is like C99 inline.
+ if (FD->getStorageClass() == FunctionDecl::Extern)
+ return CodeGenModule::GVA_C99Inline;
+ // Normal inline is a strong symbol.
+ return CodeGenModule::GVA_StrongExternal;
+ } else if (FD->hasActiveGNUInlineAttribute()) {
+ // GCC in C99 mode seems to use a different decision-making
+ // process for extern inline, which factors in previous
+ // declarations.
+ if (FD->isExternGNUInline())
+ return CodeGenModule::GVA_C99Inline;
+ // Normal inline is a strong symbol.
+ return CodeGenModule::GVA_StrongExternal;
+ }
+
+ // The definition of inline changes based on the language. Note that we
+ // have already handled "static inline" above, with the GVA_Internal case.
+ if (Features.CPlusPlus) // inline and extern inline.
+ return CodeGenModule::GVA_CXXInline;
+
+ assert(Features.C99 && "Must be in C99 mode if not in C89 or C++ mode");
+ if (FD->isC99InlineDefinition())
+ return CodeGenModule::GVA_C99Inline;
+
+ return CodeGenModule::GVA_StrongExternal;
+}
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV) {
+ GVALinkage Linkage = GetLinkageForFunction(D, Features);
+
+ if (Linkage == GVA_Internal) {
+ GV->setLinkage(llvm::Function::InternalLinkage);
+ } else if (D->hasAttr<DLLExportAttr>()) {
+ GV->setLinkage(llvm::Function::DLLExportLinkage);
+ } else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>()) {
+ GV->setLinkage(llvm::Function::WeakAnyLinkage);
+ } else if (Linkage == GVA_C99Inline) {
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ GV->setLinkage(llvm::Function::AvailableExternallyLinkage);
+ } else if (Linkage == GVA_CXXInline) {
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ GV->setLinkage(llvm::Function::LinkOnceODRLinkage);
+ } else {
+ assert(Linkage == GVA_StrongExternal);
+ // Otherwise, we have strong external linkage.
+ GV->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ SetCommonAttributes(D, GV);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F) {
+ AttributeListType AttributeList;
+ ConstructAttributeList(Info, D, AttributeList);
+
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.size()));
+
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<FastCallAttr>())
+ F->setCallingConv(llvm::CallingConv::X86_FastCall);
+
+ if (D->hasAttr<StdCallAttr>())
+ F->setCallingConv(llvm::CallingConv::X86_StdCall);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+ llvm::Function *F) {
+ if (!Features.Exceptions && !Features.ObjCNonFragileABI)
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ if (D->hasAttr<AlwaysInlineAttr>())
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
+ if (D->hasAttr<NoinlineAttr>())
+ F->addFnAttr(llvm::Attribute::NoInline);
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ setGlobalVisibility(GV, D);
+
+ if (D->hasAttr<UsedAttr>())
+ AddUsedGlobal(GV);
+
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+ llvm::Function *F,
+ const CGFunctionInfo &FI) {
+ SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributesForDefinition(D, F);
+
+ F->setLinkage(llvm::Function::InternalLinkage);
+
+ SetCommonAttributes(D, F);
+}
+
+void CodeGenModule::SetFunctionAttributes(const FunctionDecl *FD,
+ llvm::Function *F,
+ bool IsIncompleteFunction) {
+ if (!IsIncompleteFunction)
+ SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(FD), F);
+
+ // Only a few attributes are set on declarations; these may later be
+ // overridden by a definition.
+
+ if (FD->hasAttr<DLLImportAttr>()) {
+ F->setLinkage(llvm::Function::DLLImportLinkage);
+ } else if (FD->hasAttr<WeakAttr>() || FD->hasAttr<WeakImportAttr>()) {
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else {
+ F->setLinkage(llvm::Function::ExternalLinkage);
+ }
+
+ if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
+}
+
+void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMUsed.push_back(GV);
+}
+
+void CodeGenModule::EmitLLVMUsed() {
+ // Don't create llvm.used if there is no need.
+ if (LLVMUsed.empty())
+ return;
+
+ llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, LLVMUsed.size());
+
+ // Convert LLVMUsed to what ConstantArray needs.
+ std::vector<llvm::Constant*> UsedArray;
+ UsedArray.resize(LLVMUsed.size());
+ for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+ UsedArray[i] =
+ llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]), i8PTy);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(ATy, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray),
+ "llvm.used", &getModule());
+
+ GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::EmitDeferred() {
+ // Emit code for any potentially referenced deferred decls. Since a
+ // previously unused static decl may become used during the generation of code
+ // for a static function, iterate until no changes are made.
+ while (!DeferredDeclsToEmit.empty()) {
+ GlobalDecl D = DeferredDeclsToEmit.back();
+ DeferredDeclsToEmit.pop_back();
+
+ // The mangled name for the decl must have been emitted in GlobalDeclMap.
+ // Look it up to see if it was defined with a stronger definition (e.g. an
+ // extern inline function with a strong function redefinition). If so,
+ // just ignore the deferred decl.
+ llvm::GlobalValue *CGRef = GlobalDeclMap[getMangledName(D)];
+ assert(CGRef && "Deferred decl wasn't referenced?");
+
+ if (!CGRef->isDeclaration())
+ continue;
+
+ // Otherwise, emit the definition and move on to the next one.
+ EmitGlobalDefinition(D);
+ }
+}
+
+/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+/// annotation information for a given GlobalValue. The annotation struct is
+/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+/// GlobalValue being annotated. The second field is the constant string
+/// created from the AnnotateAttr's annotation. The third field is a constant
+/// string containing the name of the translation unit. The fourth field is
+/// the line number in the file of the annotated value declaration.
+///
+/// FIXME: this does not unique the annotation string constants, as llvm-gcc
+/// appears to.
+///
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ unsigned LineNo) {
+ llvm::Module *M = &getModule();
+
+ // get [N x i8] constants for the annotation string, and the filename string
+ // which are the 2nd and 3rd elements of the global annotation structure.
+ const llvm::Type *SBP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ llvm::Constant *anno = llvm::ConstantArray::get(AA->getAnnotation(), true);
+ llvm::Constant *unit = llvm::ConstantArray::get(M->getModuleIdentifier(),
+ true);
+
+ // Get the two global values corresponding to the ConstantArrays we just
+ // created to hold the bytes of the strings.
+ const char *StringPrefix = getContext().Target.getStringSymbolPrefix(true);
+ llvm::GlobalValue *annoGV =
+ new llvm::GlobalVariable(anno->getType(), false,
+ llvm::GlobalValue::InternalLinkage, anno,
+ GV->getName() + StringPrefix, M);
+ // translation unit name string, emitted into the llvm.metadata section.
+ llvm::GlobalValue *unitGV =
+ new llvm::GlobalVariable(unit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, unit,
+ StringPrefix, M);
+
+ // Create the ConstantStruct for the global annotation.
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantExpr::getBitCast(GV, SBP),
+ llvm::ConstantExpr::getBitCast(annoGV, SBP),
+ llvm::ConstantExpr::getBitCast(unitGV, SBP),
+ llvm::ConstantInt::get(llvm::Type::Int32Ty, LineNo)
+ };
+ return llvm::ConstantStruct::get(Fields, 4, false);
+}
+
+bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+ // Never defer when EmitAllDecls is specified or the decl has
+ // attribute used.
+ if (Features.EmitAllDecls || Global->hasAttr<UsedAttr>())
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Constructors and destructors should never be deferred.
+ if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
+ return false;
+
+ GVALinkage Linkage = GetLinkageForFunction(FD, Features);
+
+ // static, static inline, always_inline, and extern inline functions can
+ // always be deferred. Normal inline functions can be deferred in C99/C++.
+ if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
+ Linkage == GVA_CXXInline)
+ return true;
+ return false;
+ }
+
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Invalid decl");
+
+ return VD->getStorageClass() == VarDecl::Static;
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+ const ValueDecl *Global = GD.getDecl();
+
+ // If this is an alias definition (which otherwise looks like a declaration)
+ // emit it now.
+ if (Global->hasAttr<AliasAttr>())
+ return EmitAliasDefinition(Global);
+
+ // Ignore declarations, they will be emitted on their first use.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Forward declarations are emitted lazily on first use.
+ if (!FD->isThisDeclarationADefinition())
+ return;
+ } else {
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+ // In C++, if this is marked "extern", defer code generation.
+ if (getLangOptions().CPlusPlus && !VD->getInit() &&
+ (VD->getStorageClass() == VarDecl::Extern ||
+ VD->isExternC(getContext())))
+ return;
+
+ // In C, if this isn't a definition, defer code generation.
+ if (!getLangOptions().CPlusPlus && !VD->getInit())
+ return;
+ }
+
+ // Defer code generation when possible if this is a static definition, inline
+ // function etc. These we only want to emit if they are used.
+ if (MayDeferGeneration(Global)) {
+ // If the value has already been used, add it directly to the
+ // DeferredDeclsToEmit list.
+ const char *MangledName = getMangledName(GD);
+ if (GlobalDeclMap.count(MangledName))
+ DeferredDeclsToEmit.push_back(GD);
+ else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
+ return;
+ }
+
+ // Otherwise emit the definition.
+ EmitGlobalDefinition(GD);
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+ const ValueDecl *D = GD.getDecl();
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ EmitCXXConstructor(CD, GD.getCtorType());
+ else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ EmitCXXDestructor(DD, GD.getDtorType());
+ else if (isa<FunctionDecl>(D))
+ EmitGlobalFunctionDefinition(GD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ EmitGlobalVarDefinition(VD);
+ else {
+ assert(0 && "Invalid argument to EmitGlobalDefinition()");
+ }
+}
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+ if (Entry) {
+ if (Entry->getType()->getElementType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(Entry, PTy);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+ DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ } else if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl())) {
+ // If this the first reference to a C++ inline function in a class, queue up
+ // the deferred function body for emission. These are not seen as
+ // top-level declarations.
+ if (FD->isThisDeclarationADefinition() && MayDeferGeneration(FD))
+ DeferredDeclsToEmit.push_back(D);
+ }
+
+ // This function doesn't have a complete type (for example, the return
+ // type is an incomplete struct). Use a fake type instead, and make
+ // sure not to try to set attributes.
+ bool IsIncompleteFunction = false;
+ if (!isa<llvm::FunctionType>(Ty)) {
+ Ty = llvm::FunctionType::get(llvm::Type::VoidTy,
+ std::vector<const llvm::Type*>(), false);
+ IsIncompleteFunction = true;
+ }
+ llvm::Function *F = llvm::Function::Create(cast<llvm::FunctionType>(Ty),
+ llvm::Function::ExternalLinkage,
+ "", &getModule());
+ F->setName(MangledName);
+ if (D.getDecl())
+ SetFunctionAttributes(cast<FunctionDecl>(D.getDecl()), F,
+ IsIncompleteFunction);
+ Entry = F;
+ return F;
+}
+
+/// GetAddrOfFunction - Return the address of the given function. If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty) {
+ // If there was no specific requested type, just convert it now.
+ if (!Ty)
+ Ty = getTypes().ConvertType(GD.getDecl()->getType());
+ return GetOrCreateLLVMFunction(getMangledName(GD.getDecl()), Ty, GD);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
+ const char *Name) {
+ // Convert Name to be a uniqued string from the IdentifierInfo table.
+ Name = getContext().Idents.get(Name).getName();
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl());
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *CodeGenModule::GetOrCreateLLVMGlobal(const char *MangledName,
+ const llvm::PointerType*Ty,
+ const VarDecl *D) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+ if (Entry) {
+ if (Entry->getType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+ DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Ty->getElementType(), false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "", &getModule(),
+ false, Ty->getAddressSpace());
+ GV->setName(MangledName);
+
+ // Handle things which are present even on external declarations.
+ if (D) {
+ // FIXME: This code is overly simple and should be merged with other global
+ // handling.
+ GV->setConstant(D->getType().isConstant(Context));
+
+ // FIXME: Merge with other attribute handling code.
+ if (D->getStorageClass() == VarDecl::PrivateExtern)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+ if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ GV->setThreadLocal(D->isThreadSpecified());
+ }
+
+ return Entry = GV;
+}
+
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable. If Ty is non-null and if the global doesn't exist,
+/// then it will be greated with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty) {
+ assert(D->hasGlobalStorage() && "Not a global variable");
+ QualType ASTTy = D->getType();
+ if (Ty == 0)
+ Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+ const llvm::PointerType *PTy =
+ llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
+ return GetOrCreateLLVMGlobal(getMangledName(D), PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
+ const char *Name) {
+ // Convert Name to be a uniqued string from the IdentifierInfo table.
+ Name = getContext().Idents.get(Name).getName();
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+ assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+ if (MayDeferGeneration(D)) {
+ // If we have not seen a reference to this variable yet, place it
+ // into the deferred declarations table to be emitted if needed
+ // later.
+ const char *MangledName = getMangledName(D);
+ if (GlobalDeclMap.count(MangledName) == 0) {
+ DeferredDecls[MangledName] = GlobalDecl(D);
+ return;
+ }
+ }
+
+ // The tentative definition is the only definition.
+ EmitGlobalVarDefinition(D);
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+ llvm::Constant *Init = 0;
+ QualType ASTTy = D->getType();
+
+ if (D->getInit() == 0) {
+ // This is a tentative definition; tentative definitions are
+ // implicitly initialized with { 0 }.
+ //
+ // Note that tentative definitions are only emitted at the end of
+ // a translation unit, so they should never have incomplete
+ // type. In addition, EmitTentativeDefinition makes sure that we
+ // never attempt to emit a tentative definition if a real one
+ // exists. A use may still exists, however, so we still may need
+ // to do a RAUW.
+ assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+ Init = llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(ASTTy));
+ } else {
+ Init = EmitConstantExpr(D->getInit(), D->getType());
+ if (!Init) {
+ ErrorUnsupported(D, "static initializer");
+ QualType T = D->getInit()->getType();
+ Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+ }
+ }
+
+ const llvm::Type* InitType = Init->getType();
+ llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+ // Entry is now either a Function or GlobalVariable.
+ llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+ // We have a definition after a declaration with the wrong type.
+ // We must make a new GlobalVariable* and update everything that used OldGV
+ // (a declaration or tentative definition) with the new GlobalVariable*
+ // (which will be a definition).
+ //
+ // This happens if there is a prototype for a global (e.g.
+ // "extern int x[];") and then a definition of a different type (e.g.
+ // "int x[10];"). This also happens when an initializer has a different type
+ // from the type of the global (this happens with unions).
+ if (GV == 0 ||
+ GV->getType()->getElementType() != InitType ||
+ GV->getType()->getAddressSpace() != ASTTy.getAddressSpace()) {
+
+ // Remove the old entry from GlobalDeclMap so that we'll create a new one.
+ GlobalDeclMap.erase(getMangledName(D));
+
+ // Make a new global with the correct type, this is now guaranteed to work.
+ GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+ GV->takeName(cast<llvm::GlobalValue>(Entry));
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+ }
+
+ if (const AnnotateAttr *AA = D->getAttr<AnnotateAttr>()) {
+ SourceManager &SM = Context.getSourceManager();
+ AddAnnotation(EmitAnnotateAttr(GV, AA,
+ SM.getInstantiationLineNumber(D->getLocation())));
+ }
+
+ GV->setInitializer(Init);
+ GV->setConstant(D->getType().isConstant(Context));
+ GV->setAlignment(getContext().getDeclAlignInBytes(D));
+
+ // Set the llvm linkage type as appropriate.
+ if (D->getStorageClass() == VarDecl::Static)
+ GV->setLinkage(llvm::Function::InternalLinkage);
+ else if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::Function::DLLImportLinkage);
+ else if (D->hasAttr<DLLExportAttr>())
+ GV->setLinkage(llvm::Function::DLLExportLinkage);
+ else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>())
+ GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ else if (!CompileOpts.NoCommon &&
+ (!D->hasExternalStorage() && !D->getInit()))
+ GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
+ else
+ GV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D->getLocation());
+ DI->EmitGlobalVariable(GV, D);
+ }
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}". If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn) {
+ // If we're redefining a global as a function, don't transform it.
+ llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
+ if (OldFn == 0) return;
+
+ const llvm::Type *NewRetTy = NewFn->getReturnType();
+ llvm::SmallVector<llvm::Value*, 4> ArgList;
+
+ for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
+ UI != E; ) {
+ // TODO: Do invokes ever occur in C code? If so, we should handle them too.
+ llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*UI++);
+ if (!CI) continue;
+
+ // If the return types don't match exactly, and if the call isn't dead, then
+ // we can't transform this call.
+ if (CI->getType() != NewRetTy && !CI->use_empty())
+ continue;
+
+ // If the function was passed too few arguments, don't transform. If extra
+ // arguments were passed, we silently drop them. If any of the types
+ // mismatch, we don't transform.
+ unsigned ArgNo = 0;
+ bool DontTransform = false;
+ for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
+ E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
+ if (CI->getNumOperands()-1 == ArgNo ||
+ CI->getOperand(ArgNo+1)->getType() != AI->getType()) {
+ DontTransform = true;
+ break;
+ }
+ }
+ if (DontTransform)
+ continue;
+
+ // Okay, we can transform this. Create the new call instruction and copy
+ // over the required information.
+ ArgList.append(CI->op_begin()+1, CI->op_begin()+1+ArgNo);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
+ ArgList.end(), "", CI);
+ ArgList.clear();
+ if (NewCall->getType() != llvm::Type::VoidTy)
+ NewCall->takeName(CI);
+ NewCall->setCallingConv(CI->getCallingConv());
+ NewCall->setAttributes(CI->getAttributes());
+
+ // Finally, remove the old call, replacing any uses with the new one.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewCall);
+ CI->eraseFromParent();
+ }
+}
+
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+ const llvm::FunctionType *Ty;
+ const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ bool isVariadic = D->getType()->getAsFunctionProtoType()->isVariadic();
+
+ Ty = getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), isVariadic);
+ } else {
+ Ty = cast<llvm::FunctionType>(getTypes().ConvertType(D->getType()));
+
+ // As a special case, make sure that definitions of K&R function
+ // "type foo()" aren't declared as varargs (which forces the backend
+ // to do unnecessary work).
+ if (D->getType()->isFunctionNoProtoType()) {
+ assert(Ty->isVarArg() && "Didn't lower type as expected");
+ // Due to stret, the lowered function could have arguments.
+ // Just create the same type as was lowered by ConvertType
+ // but strip off the varargs bit.
+ std::vector<const llvm::Type*> Args(Ty->param_begin(), Ty->param_end());
+ Ty = llvm::FunctionType::get(Ty->getReturnType(), Args, false);
+ }
+ }
+
+ // Get or create the prototype for the function.
+ llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
+ llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // F is the Function* for the one with the wrong type, we must make a new
+ // Function* and update everything that used F (a declaration) with the new
+ // Function* (which will be a definition).
+ //
+ // This happens if there is a prototype for a function
+ // (e.g. "int f()") and then a definition of a different type
+ // (e.g. "int f(int x)"). Start by making a new function of the
+ // correct type, RAUW, then steal the name.
+ GlobalDeclMap.erase(getMangledName(D));
+ llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
+ NewFn->takeName(OldFn);
+
+ // If this is an implementation of a function without a prototype, try to
+ // replace any existing uses of the function (which may be calls) with uses
+ // of the new function
+ if (D->getType()->isFunctionNoProtoType()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
+ OldFn->removeDeadConstantUsers();
+ }
+
+ // Replace uses of F with the Function we will endow with a body.
+ if (!Entry->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Ok, delete the old function now, which is dead.
+ OldFn->eraseFromParent();
+
+ Entry = NewFn;
+ }
+
+ llvm::Function *Fn = cast<llvm::Function>(Entry);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+ if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+ AddGlobalCtor(Fn, CA->getPriority());
+ if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+ AddGlobalDtor(Fn, DA->getPriority());
+}
+
+void CodeGenModule::EmitAliasDefinition(const ValueDecl *D) {
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ assert(AA && "Not an alias?");
+
+ const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+ // Unique the name through the identifier table.
+ const char *AliaseeName = AA->getAliasee().c_str();
+ AliaseeName = getContext().Idents.get(AliaseeName).getName();
+
+ // Create a reference to the named value. This ensures that it is emitted
+ // if a deferred decl.
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AliaseeName, DeclTy, GlobalDecl());
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AliaseeName,
+ llvm::PointerType::getUnqual(DeclTy), 0);
+
+ // Create the new alias itself, but don't set a name yet.
+ llvm::GlobalValue *GA =
+ new llvm::GlobalAlias(Aliasee->getType(),
+ llvm::Function::ExternalLinkage,
+ "", Aliasee, &getModule());
+
+ // See if there is already something with the alias' name in the module.
+ const char *MangledName = getMangledName(D);
+ llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+
+ if (Entry && !Entry->isDeclaration()) {
+ // If there is a definition in the module, then it wins over the alias.
+ // This is dubious, but allow it to be safe. Just ignore the alias.
+ GA->eraseFromParent();
+ return;
+ }
+
+ if (Entry) {
+ // If there is a declaration in the module, then we had an extern followed
+ // by the alias, as in:
+ // extern int test6();
+ // ...
+ // int test6() __attribute__((alias("test7")));
+ //
+ // Remove it and replace uses of it with the alias.
+
+ Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+ Entry->getType()));
+ Entry->eraseFromParent();
+ }
+
+ // Now we know that there is no conflict, set the name.
+ Entry = GA;
+ GA->setName(MangledName);
+
+ // Set attributes which are particular to an alias; this is a
+ // specialization of the attributes which may be set on a global
+ // variable/function.
+ if (D->hasAttr<DLLExportAttr>()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // The dllexport attribute is ignored for undefined symbols.
+ if (FD->getBody(getContext()))
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ } else {
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ }
+ } else if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakImportAttr>()) {
+ GA->setLinkage(llvm::Function::WeakAnyLinkage);
+ }
+
+ SetCommonAttributes(D, GA);
+}
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(unsigned BuiltinID) {
+ assert((Context.BuiltinInfo.isLibFunction(BuiltinID) ||
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
+ "isn't a lib fn");
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ const char *Name = Context.BuiltinInfo.GetName(BuiltinID);
+ if (Context.BuiltinInfo.isLibFunction(BuiltinID))
+ Name += 10;
+
+ // Get the type for the builtin.
+ Builtin::Context::GetBuiltinTypeError Error;
+ QualType Type = Context.BuiltinInfo.GetBuiltinType(BuiltinID, Context, Error);
+ assert(Error == Builtin::Context::GE_None && "Can't get builtin type");
+
+ const llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(Type));
+
+ // Unique the name through the identifier table.
+ Name = getContext().Idents.get(Name).getName();
+ // FIXME: param attributes for sext/zext etc.
+ return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
+ unsigned NumTys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(),
+ (llvm::Intrinsic::ID)IID, Tys, NumTys);
+}
+
+llvm::Function *CodeGenModule::getMemCpyFn() {
+ if (MemCpyFn) return MemCpyFn;
+ const llvm::Type *IntPtr = TheTargetData.getIntPtrType();
+ return MemCpyFn = getIntrinsic(llvm::Intrinsic::memcpy, &IntPtr, 1);
+}
+
+llvm::Function *CodeGenModule::getMemMoveFn() {
+ if (MemMoveFn) return MemMoveFn;
+ const llvm::Type *IntPtr = TheTargetData.getIntPtrType();
+ return MemMoveFn = getIntrinsic(llvm::Intrinsic::memmove, &IntPtr, 1);
+}
+
+llvm::Function *CodeGenModule::getMemSetFn() {
+ if (MemSetFn) return MemSetFn;
+ const llvm::Type *IntPtr = TheTargetData.getIntPtrType();
+ return MemSetFn = getIntrinsic(llvm::Intrinsic::memset, &IntPtr, 1);
+}
+
+static void appendFieldAndPadding(CodeGenModule &CGM,
+ std::vector<llvm::Constant*>& Fields,
+ FieldDecl *FieldD, FieldDecl *NextFieldD,
+ llvm::Constant* Field,
+ RecordDecl* RD, const llvm::StructType *STy) {
+ // Append the field.
+ Fields.push_back(Field);
+
+ int StructFieldNo = CGM.getTypes().getLLVMFieldNo(FieldD);
+
+ int NextStructFieldNo;
+ if (!NextFieldD) {
+ NextStructFieldNo = STy->getNumElements();
+ } else {
+ NextStructFieldNo = CGM.getTypes().getLLVMFieldNo(NextFieldD);
+ }
+
+ // Append padding
+ for (int i = StructFieldNo + 1; i < NextStructFieldNo; i++) {
+ llvm::Constant *C =
+ llvm::Constant::getNullValue(STy->getElementType(StructFieldNo + 1));
+
+ Fields.push_back(C);
+ }
+}
+
+llvm::Constant *CodeGenModule::
+GetAddrOfConstantCFString(const StringLiteral *Literal) {
+ std::string str;
+ unsigned StringLength = 0;
+
+ bool isUTF16 = false;
+ if (Literal->containsNonAsciiOrNull()) {
+ // Convert from UTF-8 to UTF-16.
+ llvm::SmallVector<UTF16, 128> ToBuf(Literal->getByteLength());
+ const UTF8 *FromPtr = (UTF8 *)Literal->getStrData();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ ConversionResult Result;
+ Result = ConvertUTF8toUTF16(&FromPtr, FromPtr+Literal->getByteLength(),
+ &ToPtr, ToPtr+Literal->getByteLength(),
+ strictConversion);
+ if (Result == conversionOK) {
+ // FIXME: Storing UTF-16 in a C string is a hack to test Unicode strings
+ // without doing more surgery to this routine. Since we aren't explicitly
+ // checking for endianness here, it's also a bug (when generating code for
+ // a target that doesn't match the host endianness). Modeling this as an
+ // i16 array is likely the cleanest solution.
+ StringLength = ToPtr-&ToBuf[0];
+ str.assign((char *)&ToBuf[0], StringLength*2);// Twice as many UTF8 chars.
+ isUTF16 = true;
+ } else if (Result == sourceIllegal) {
+ // FIXME: Have Sema::CheckObjCString() validate the UTF-8 string.
+ str.assign(Literal->getStrData(), Literal->getByteLength());
+ StringLength = str.length();
+ } else
+ assert(Result == conversionOK && "UTF-8 to UTF-16 conversion failed");
+
+ } else {
+ str.assign(Literal->getStrData(), Literal->getByteLength());
+ StringLength = str.length();
+ }
+ llvm::StringMapEntry<llvm::Constant *> &Entry =
+ CFConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(llvm::Type::Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ if (!CFConstantStringClassRef) {
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+
+ // FIXME: This is fairly broken if __CFConstantStringClassReference is
+ // already defined, in that it will get renamed and the user will most
+ // likely see an opaque error message. This is a general issue with relying
+ // on particular names.
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(Ty, false,
+ llvm::GlobalVariable::ExternalLinkage, 0,
+ "__CFConstantStringClassReference",
+ &getModule());
+
+ // Decay array -> ptr
+ CFConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+ }
+
+ QualType CFTy = getContext().getCFConstantStringType();
+ RecordDecl *CFRD = CFTy->getAsRecordType()->getDecl();
+
+ const llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+ std::vector<llvm::Constant*> Fields;
+ RecordDecl::field_iterator Field = CFRD->field_begin(getContext());
+
+ // Class pointer.
+ FieldDecl *CurField = *Field++;
+ FieldDecl *NextField = *Field++;
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ CFConstantStringClassRef, CFRD, STy);
+
+ // Flags.
+ CurField = NextField;
+ NextField = *Field++;
+ const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0)
+ : llvm::ConstantInt::get(Ty, 0x07C8),
+ CFRD, STy);
+
+ // String pointer.
+ CurField = NextField;
+ NextField = *Field++;
+ llvm::Constant *C = llvm::ConstantArray::get(str);
+
+ const char *Sect, *Prefix;
+ bool isConstant;
+ if (isUTF16) {
+ Prefix = getContext().Target.getUnicodeStringSymbolPrefix();
+ Sect = getContext().Target.getUnicodeStringSection();
+ // FIXME: Why does GCC not set constant here?
+ isConstant = false;
+ } else {
+ Prefix = getContext().Target.getStringSymbolPrefix(true);
+ Sect = getContext().Target.getCFStringDataSection();
+ // FIXME: -fwritable-strings should probably affect this, but we
+ // are following gcc here.
+ isConstant = true;
+ }
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(C->getType(), isConstant,
+ llvm::GlobalValue::InternalLinkage,
+ C, Prefix, &getModule());
+ if (Sect)
+ GV->setSection(Sect);
+ if (isUTF16) {
+ unsigned Align = getContext().getTypeAlign(getContext().ShortTy)/8;
+ GV->setAlignment(Align);
+ }
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2),
+ CFRD, STy);
+
+ // String length.
+ CurField = NextField;
+ NextField = 0;
+ Ty = getTypes().ConvertType(getContext().LongTy);
+ appendFieldAndPadding(*this, Fields, CurField, NextField,
+ llvm::ConstantInt::get(Ty, StringLength), CFRD, STy);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(C->getType(), true,
+ llvm::GlobalVariable::InternalLinkage, C,
+ getContext().Target.getCFStringSymbolPrefix(),
+ &getModule());
+ if (const char *Sect = getContext().Target.getCFStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+/// GetStringForStringLiteral - Return the appropriate bytes for a
+/// string literal, properly padded to match the literal type.
+std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
+ const char *StrData = E->getStrData();
+ unsigned Len = E->getByteLength();
+
+ const ConstantArrayType *CAT =
+ getContext().getAsConstantArrayType(E->getType());
+ assert(CAT && "String isn't pointer or array!");
+
+ // Resize the string to the right size.
+ std::string Str(StrData, StrData+Len);
+ uint64_t RealLen = CAT->getSize().getZExtValue();
+
+ if (E->isWide())
+ RealLen *= getContext().Target.getWCharWidth()/8;
+
+ Str.resize(RealLen, '\0');
+
+ return Str;
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+ // FIXME: This can be more efficient.
+ return GetAddrOfConstantString(GetStringForStringLiteral(S));
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+ std::string Str;
+ getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+ return GetAddrOfConstantCString(Str);
+}
+
+
+/// GenerateWritableString -- Creates storage for a string literal.
+static llvm::Constant *GenerateStringLiteral(const std::string &str,
+ bool constant,
+ CodeGenModule &CGM,
+ const char *GlobalName) {
+ // Create Constant for this string literal. Don't add a '\0'.
+ llvm::Constant *C = llvm::ConstantArray::get(str, false);
+
+ // Create a global variable for this string
+ return new llvm::GlobalVariable(C->getType(), constant,
+ llvm::GlobalValue::InternalLinkage,
+ C, GlobalName, &CGM.getModule());
+}
+
+/// GetAddrOfConstantString - Returns a pointer to a character array
+/// containing the literal. This contents are exactly that of the
+/// given string, i.e. it will not be null terminated automatically;
+/// see GetAddrOfConstantCString. Note that whether the result is
+/// actually a pointer to an LLVM constant depends on
+/// Feature.WriteableStrings.
+///
+/// The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str,
+ const char *GlobalName) {
+ bool IsConstant = !Features.WritableStrings;
+
+ // Get the default prefix if a name wasn't specified.
+ if (!GlobalName)
+ GlobalName = getContext().Target.getStringSymbolPrefix(IsConstant);
+
+ // Don't share any string literals if strings aren't constant.
+ if (!IsConstant)
+ return GenerateStringLiteral(str, false, *this, GlobalName);
+
+ llvm::StringMapEntry<llvm::Constant *> &Entry =
+ ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+
+ if (Entry.getValue())
+ return Entry.getValue();
+
+ // Create a global variable for this.
+ llvm::Constant *C = GenerateStringLiteral(str, true, *this, GlobalName);
+ Entry.setValue(C);
+ return C;
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character
+/// array containing the literal and a terminating '\-'
+/// character. The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName){
+ return GetAddrOfConstantString(str + '\0', GlobalName);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+ ObjCImplementationDecl *D) {
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(getContext()),
+ e = D->propimpl_end(getContext()); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ // Dynamic is just for type-checking.
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ // Determine which methods need to be implemented, some may have
+ // been overridden. Note that ::isSynthesized is not the method
+ // we want, that just indicates if the decl came from a
+ // property. What we want to know is if the method is defined in
+ // this implementation.
+ if (!D->getInstanceMethod(getContext(), PD->getGetterName()))
+ CodeGenFunction(*this).GenerateObjCGetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ if (!PD->isReadOnly() &&
+ !D->getInstanceMethod(getContext(), PD->getSetterName()))
+ CodeGenFunction(*this).GenerateObjCSetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ }
+ }
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+ for (RecordDecl::decl_iterator I = ND->decls_begin(getContext()),
+ E = ND->decls_end(getContext());
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+ if (LSD->getLanguage() != LinkageSpecDecl::lang_c) {
+ ErrorUnsupported(LSD, "linkage spec");
+ return;
+ }
+
+ for (RecordDecl::decl_iterator I = LSD->decls_begin(getContext()),
+ E = LSD->decls_end(getContext());
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+ // If an error has occurred, stop code generation, but continue
+ // parsing and semantic analysis (to ensure all warnings and errors
+ // are emitted).
+ if (Diags.hasErrorOccurred())
+ return;
+
+ switch (D->getKind()) {
+ case Decl::CXXMethod:
+ case Decl::Function:
+ case Decl::Var:
+ EmitGlobal(GlobalDecl(cast<ValueDecl>(D)));
+ break;
+
+ // C++ Decls
+ case Decl::Namespace:
+ EmitNamespace(cast<NamespaceDecl>(D));
+ break;
+ case Decl::CXXConstructor:
+ EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+ break;
+ case Decl::CXXDestructor:
+ EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+ break;
+
+ // Objective-C Decls
+
+ // Forward declarations, no (immediate) code generation.
+ case Decl::ObjCClass:
+ case Decl::ObjCForwardProtocol:
+ case Decl::ObjCCategory:
+ case Decl::ObjCInterface:
+ break;
+
+ case Decl::ObjCProtocol:
+ Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+ break;
+
+ case Decl::ObjCCategoryImpl:
+ // Categories have properties but don't support synthesize so we
+ // can ignore them here.
+ Runtime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ break;
+
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+ EmitObjCPropertyImplementations(OMD);
+ Runtime->GenerateClass(OMD);
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
+ // If this is not a prototype, emit the body.
+ if (OMD->getBody(getContext()))
+ CodeGenFunction(*this).GenerateObjCMethod(OMD);
+ break;
+ }
+ case Decl::ObjCCompatibleAlias:
+ // compatibility-alias is a directive and has no code gen.
+ break;
+
+ case Decl::LinkageSpec:
+ EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+ break;
+
+ case Decl::FileScopeAsm: {
+ FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
+ std::string AsmString(AD->getAsmString()->getStrData(),
+ AD->getAsmString()->getByteLength());
+
+ const std::string &S = getModule().getModuleInlineAsm();
+ if (S.empty())
+ getModule().setModuleInlineAsm(AsmString);
+ else
+ getModule().setModuleInlineAsm(S + '\n' + AsmString);
+ break;
+ }
+
+ default:
+ // Make sure we handled everything we should, every other kind is a
+ // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
+ // function. Need to recode Decl::Kind to do that easily.
+ assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ }
+}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..4d50e89
--- /dev/null
+++ b/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,467 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENMODULE_H
+#define CLANG_CODEGEN_CODEGENMODULE_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "CGBlocks.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CodeGenTypes.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/ValueHandle.h"
+#include <list>
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+}
+
+namespace clang {
+ class ASTContext;
+ class FunctionDecl;
+ class IdentifierInfo;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCProtocolDecl;
+ class ObjCEncodeExpr;
+ class BlockExpr;
+ class Decl;
+ class Expr;
+ class Stmt;
+ class StringLiteral;
+ class NamedDecl;
+ class ValueDecl;
+ class VarDecl;
+ class LangOptions;
+ class CompileOptions;
+ class Diagnostic;
+ class AnnotateAttr;
+ class CXXDestructorDecl;
+
+namespace CodeGen {
+
+ class CodeGenFunction;
+ class CGDebugInfo;
+ class CGObjCRuntime;
+
+/// GlobalDecl - represents a global declaration. This can either be a
+/// CXXConstructorDecl and the constructor type (Base, Complete).
+/// a CXXDestructorDecl and the destructor type (Base, Complete) or
+// a regular VarDecl or a FunctionDecl.
+class GlobalDecl {
+ llvm::PointerIntPair<const ValueDecl*, 2> Value;
+
+public:
+ GlobalDecl() {}
+
+ explicit GlobalDecl(const ValueDecl *VD) : Value(VD, 0) {
+ assert(!isa<CXXConstructorDecl>(VD) && "Use other ctor with ctor decls!");
+ assert(!isa<CXXDestructorDecl>(VD) && "Use other ctor with dtor decls!");
+ }
+ GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
+ : Value(D, Type) {}
+ GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
+ : Value(D, Type) {}
+
+ const ValueDecl *getDecl() const { return Value.getPointer(); }
+
+ CXXCtorType getCtorType() const {
+ assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
+ return static_cast<CXXCtorType>(Value.getInt());
+ }
+
+ CXXDtorType getDtorType() const {
+ assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
+ return static_cast<CXXDtorType>(Value.getInt());
+ }
+};
+
+/// CodeGenModule - This class organizes the cross-function state that is used
+/// while generating LLVM code.
+class CodeGenModule : public BlockModule {
+ CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+
+ typedef std::vector< std::pair<llvm::Constant*, int> > CtorList;
+
+ ASTContext &Context;
+ const LangOptions &Features;
+ const CompileOptions &CompileOpts;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ Diagnostic &Diags;
+ CodeGenTypes Types;
+ CGObjCRuntime* Runtime;
+ CGDebugInfo* DebugInfo;
+
+ llvm::Function *MemCpyFn;
+ llvm::Function *MemMoveFn;
+ llvm::Function *MemSetFn;
+
+ /// GlobalDeclMap - Mapping of decl names (represented as unique
+ /// character pointers from either the identifier table or the set
+ /// of mangled names) to global variables we have already
+ /// emitted. Note that the entries in this map are the actual
+ /// globals and therefore may not be of the same type as the decl,
+ /// they should be bitcasted on retrieval. Also note that the
+ /// globals are keyed on their source mangled name, not the global name
+ /// (which may change with attributes such as asm-labels). The key
+ /// to this map should be generated using getMangledName().
+ ///
+ /// Note that this map always lines up exactly with the contents of the LLVM
+ /// IR symbol table, but this is quicker to query since it is doing uniqued
+ /// pointer lookups instead of full string lookups.
+ llvm::DenseMap<const char*, llvm::GlobalValue*> GlobalDeclMap;
+
+ /// \brief Contains the strings used for mangled names.
+ ///
+ /// FIXME: Eventually, this should map from the semantic/canonical
+ /// declaration for each global entity to its mangled name (if it
+ /// has one).
+ llvm::StringSet<> MangledNames;
+
+ /// DeferredDecls - This contains all the decls which have definitions but
+ /// which are deferred for emission and therefore should only be output if
+ /// they are actually used. If a decl is in this, then it is known to have
+ /// not been referenced yet. The key to this map is a uniqued mangled name.
+ llvm::DenseMap<const char*, GlobalDecl> DeferredDecls;
+
+ /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
+ /// that *are* actually referenced. These get code generated when the module
+ /// is done.
+ std::vector<GlobalDecl> DeferredDeclsToEmit;
+
+ /// LLVMUsed - List of global values which are required to be
+ /// present in the object file; bitcast to i8*. This is used for
+ /// forcing visibility of symbols which may otherwise be optimized
+ /// out.
+ std::vector<llvm::WeakVH> LLVMUsed;
+
+ /// GlobalCtors - Store the list of global constructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalCtors;
+
+ /// GlobalDtors - Store the list of global destructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalDtors;
+
+ std::vector<llvm::Constant*> Annotations;
+
+ llvm::StringMap<llvm::Constant*> CFConstantStringMap;
+ llvm::StringMap<llvm::Constant*> ConstantStringMap;
+
+ /// CFConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *CFConstantStringClassRef;
+public:
+ CodeGenModule(ASTContext &C, const CompileOptions &CompileOpts,
+ llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
+
+ ~CodeGenModule();
+
+ /// Release - Finalize LLVM code generation.
+ void Release();
+
+ /// getObjCRuntime() - Return a reference to the configured
+ /// Objective-C runtime.
+ CGObjCRuntime &getObjCRuntime() {
+ assert(Runtime && "No Objective-C runtime has been configured.");
+ return *Runtime;
+ }
+
+ /// hasObjCRuntime() - Return true iff an Objective-C runtime has
+ /// been configured.
+ bool hasObjCRuntime() { return !!Runtime; }
+
+ CGDebugInfo *getDebugInfo() { return DebugInfo; }
+ ASTContext &getContext() const { return Context; }
+ const CompileOptions &getCompileOpts() const { return CompileOpts; }
+ const LangOptions &getLangOptions() const { return Features; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ Diagnostic &getDiags() const { return Diags; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+
+ /// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
+ LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
+
+ /// setGlobalVisibility - Set the visibility for the given LLVM
+ /// GlobalValue.
+ void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+
+ /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+ /// given global variable. If Ty is non-null and if the global doesn't exist,
+ /// then it will be greated with the specified type instead of whatever the
+ /// normal requested type would be.
+ llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+ const llvm::Type *Ty = 0);
+
+ /// GetAddrOfFunction - Return the address of the given function. If Ty is
+ /// non-null, then this function will use the specified type if it has to
+ /// create it.
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
+ const llvm::Type *Ty = 0);
+
+ /// GetStringForStringLiteral - Return the appropriate bytes for a string
+ /// literal, properly padded to match the literal type. If only the address of
+ /// a constant is needed consider using GetAddrOfConstantStringLiteral.
+ std::string GetStringForStringLiteral(const StringLiteral *E);
+
+ /// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
+ /// for the given string literal.
+ llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+
+ /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+ /// array for the given ObjCEncodeExpr node.
+ llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+ /// GetAddrOfConstantString - Returns a pointer to a character array
+ /// containing the literal. This contents are exactly that of the given
+ /// string, i.e. it will not be null terminated automatically; see
+ /// GetAddrOfConstantCString. Note that whether the result is actually a
+ /// pointer to an LLVM constant depends on Feature.WriteableStrings.
+ ///
+ /// The result has pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global
+ /// (if one is created).
+ llvm::Constant *GetAddrOfConstantString(const std::string& str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfConstantCString - Returns a pointer to a character array
+ /// containing the literal and a terminating '\0' character. The result has
+ /// pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global (if one is
+ /// created).
+ llvm::Constant *GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName=0);
+
+ /// GetAddrOfCXXConstructor - Return the address of the constructor of the
+ /// given type.
+ llvm::Function *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+
+ /// GetAddrOfCXXDestructor - Return the address of the constructor of the
+ /// given type.
+ llvm::Function *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ /// getBuiltinLibFunction - Given a builtin id for a function like
+ /// "__builtin_fabsf", return a Function* for "fabsf".
+ llvm::Value *getBuiltinLibFunction(unsigned BuiltinID);
+
+ llvm::Function *getMemCpyFn();
+ llvm::Function *getMemMoveFn();
+ llvm::Function *getMemSetFn();
+ llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
+ unsigned NumTys = 0);
+
+ /// EmitTopLevelDecl - Emit code for a single top level declaration.
+ void EmitTopLevelDecl(Decl *D);
+
+ /// AddUsedGlobal - Add a global which should be forced to be
+ /// present in the object file; these are emitted to the llvm.used
+ /// metadata global.
+ void AddUsedGlobal(llvm::GlobalValue *GV);
+
+ void AddAnnotation(llvm::Constant *C) { Annotations.push_back(C); }
+
+ /// CreateRuntimeFunction - Create a new runtime function with the specified
+ /// type and name.
+ llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
+ const char *Name);
+ /// CreateRuntimeVariable - Create a new runtime global variable with the
+ /// specified type and name.
+ llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
+ const char *Name);
+
+ void UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+ }
+
+ /// EmitConstantExpr - Try to emit the given expression as a
+ /// constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitNullConstant - Return the result of value-initializing the given
+ /// type, i.e. a null expression of the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstant(QualType T);
+
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA, unsigned LineNo);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified decl yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError=false);
+
+ /// SetInternalFunctionAttributes - Set the attributes on the LLVM
+ /// function for the given decl and function info. This applies
+ /// attributes necessary for handling the ABI as well as user
+ /// specified attributes like section.
+ void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ const CGFunctionInfo &FI);
+
+ /// SetLLVMFunctionAttributes - Set the LLVM function attributes
+ /// (sext, zext, etc).
+ void SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F);
+
+ /// SetLLVMFunctionAttributesForDefinition - Set the LLVM function attributes
+ /// which only apply to a function definintion.
+ void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+ /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used
+ /// as a return type.
+ bool ReturnTypeUsesSret(const CGFunctionInfo &FI);
+
+ void ConstructAttributeList(const CGFunctionInfo &Info,
+ const Decl *TargetDecl,
+ AttributeListType &PAL);
+
+ const char *getMangledName(const GlobalDecl &D);
+
+ const char *getMangledName(const NamedDecl *ND);
+ const char *getMangledCXXCtorName(const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const char *getMangledCXXDtorName(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ void EmitTentativeDefinition(const VarDecl *D);
+
+ enum GVALinkage {
+ GVA_Internal,
+ GVA_C99Inline,
+ GVA_CXXInline,
+ GVA_StrongExternal
+ };
+
+private:
+ /// UniqueMangledName - Unique a name by (if necessary) inserting it into the
+ /// MangledNames string map.
+ const char *UniqueMangledName(const char *NameStart, const char *NameEnd);
+
+ llvm::Constant *GetOrCreateLLVMFunction(const char *MangledName,
+ const llvm::Type *Ty,
+ GlobalDecl D);
+ llvm::Constant *GetOrCreateLLVMGlobal(const char *MangledName,
+ const llvm::PointerType *PTy,
+ const VarDecl *D);
+
+ /// SetCommonAttributes - Set attributes which are common to any
+ /// form of a global definition (alias, Objective-C method,
+ /// function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// SetFunctionDefinitionAttributes - Set attributes for a global definition.
+ void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV);
+
+ /// SetFunctionAttributes - Set function attributes for a function
+ /// declaration.
+ void SetFunctionAttributes(const FunctionDecl *FD,
+ llvm::Function *F,
+ bool IsIncompleteFunction);
+
+ /// EmitGlobal - Emit code for a singal global function or var decl. Forward
+ /// declarations are emitted lazily.
+ void EmitGlobal(GlobalDecl D);
+
+ void EmitGlobalDefinition(GlobalDecl D);
+
+ void EmitGlobalFunctionDefinition(GlobalDecl GD);
+ void EmitGlobalVarDefinition(const VarDecl *D);
+ void EmitAliasDefinition(const ValueDecl *D);
+ void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+
+ // C++ related functions.
+
+ void EmitNamespace(const NamespaceDecl *D);
+ void EmitLinkageSpec(const LinkageSpecDecl *D);
+
+ /// EmitCXXConstructors - Emit constructors (base, complete) from a
+ /// C++ constructor Decl.
+ void EmitCXXConstructors(const CXXConstructorDecl *D);
+
+ /// EmitCXXConstructor - Emit a single constructor with the given type from
+ /// a C++ constructor Decl.
+ void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
+
+ /// EmitCXXDestructors - Emit destructors (base, complete) from a
+ /// C++ destructor Decl.
+ void EmitCXXDestructors(const CXXDestructorDecl *D);
+
+ /// EmitCXXDestructor - Emit a single destructor with the given type from
+ /// a C++ destructor Decl.
+ void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
+
+ /// EmitCtorList - Generates a global array of functions and priorities using
+ /// the given list and name. This array will have appending linkage and is
+ /// suitable for use as a LLVM constructor or destructor array.
+ void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+ void EmitAnnotations(void);
+
+ /// EmitDeferred - Emit any needed decls for which code generation
+ /// was deferred.
+ void EmitDeferred(void);
+
+ /// EmitLLVMUsed - Emit the llvm.used metadata used to force
+ /// references to global which may otherwise be optimized out.
+ void EmitLLVMUsed(void);
+
+ /// MayDeferGeneration - Determine if the given decl can be emitted
+ /// lazily; this is only relevant for definitions. The given decl
+ /// must be either a function or var decl.
+ bool MayDeferGeneration(const ValueDecl *D);
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..af791f6
--- /dev/null
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,614 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+
+#include "CGCall.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+ /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts
+ /// structs and unions. It manages transient information used during layout.
+ /// FIXME : Handle field aligments. Handle packed structs.
+ class RecordOrganizer {
+ public:
+ explicit RecordOrganizer(CodeGenTypes &Types, const RecordDecl& Record) :
+ CGT(Types), RD(Record), STy(NULL) {}
+
+ /// layoutStructFields - Do the actual work and lay out all fields. Create
+ /// corresponding llvm struct type. This should be invoked only after
+ /// all fields are added.
+ void layoutStructFields(const ASTRecordLayout &RL);
+
+ /// layoutUnionFields - Do the actual work and lay out all fields. Create
+ /// corresponding llvm struct type. This should be invoked only after
+ /// all fields are added.
+ void layoutUnionFields(const ASTRecordLayout &RL);
+
+ /// getLLVMType - Return associated llvm struct type. This may be NULL
+ /// if fields are not laid out.
+ llvm::Type *getLLVMType() const {
+ return STy;
+ }
+
+ llvm::SmallSet<unsigned, 8> &getPaddingFields() {
+ return PaddingFields;
+ }
+
+ private:
+ CodeGenTypes &CGT;
+ const RecordDecl& RD;
+ llvm::Type *STy;
+ llvm::SmallSet<unsigned, 8> PaddingFields;
+ };
+}
+
+CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
+ const llvm::TargetData &TD)
+ : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
+ TheABIInfo(0) {
+}
+
+CodeGenTypes::~CodeGenTypes() {
+ for(llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+ I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+ I != E; ++I)
+ delete I->second;
+ CGRecordLayouts.clear();
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ llvm::PATypeHolder Result = ConvertTypeRecursive(T);
+
+ // Any pointers that were converted defered evaluation of their pointee type,
+ // creating an opaque type instead. This is in order to avoid problems with
+ // circular types. Loop through all these defered pointees, if any, and
+ // resolve them now.
+ while (!PointersToResolve.empty()) {
+ std::pair<QualType, llvm::OpaqueType*> P =
+ PointersToResolve.back();
+ PointersToResolve.pop_back();
+ // We can handle bare pointers here because we know that the only pointers
+ // to the Opaque type are P.second and from other types. Refining the
+ // opqaue type away will invalidate P.second, but we don't mind :).
+ const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
+ P.second->refineAbstractTypeTo(NT);
+ }
+
+ return Result;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ // See if type is already cached.
+ llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
+ I = TypeCache.find(T.getTypePtr());
+ // If type is found in map and this is not a definition for a opaque
+ // place holder type then use it. Otherwise, convert type T.
+ if (I != TypeCache.end())
+ return I->second.get();
+
+ const llvm::Type *ResultType = ConvertNewType(T);
+ TypeCache.insert(std::make_pair(T.getTypePtr(),
+ llvm::PATypeHolder(ResultType)));
+ return ResultType;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
+ const llvm::Type *ResultType = ConvertTypeRecursive(T);
+ if (ResultType == llvm::Type::Int1Ty)
+ return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
+ return ResultType;
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type. For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
+ const llvm::Type *R = ConvertType(T);
+
+ // If this is a non-bool type, don't map it.
+ if (R != llvm::Type::Int1Ty)
+ return R;
+
+ // Otherwise, return an integer of the target-specified size.
+ return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
+
+}
+
+// Code to verify a given function type is complete, i.e. the return type
+// and all of the argument types are complete.
+static const TagType *VerifyFuncTypeComplete(const Type* T) {
+ const FunctionType *FT = cast<FunctionType>(T);
+ if (const TagType* TT = FT->getResultType()->getAsTagType())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
+ for (unsigned i = 0; i < FPT->getNumArgs(); i++)
+ if (const TagType* TT = FPT->getArgType(i)->getAsTagType())
+ if (!TT->getDecl()->isDefinition())
+ return TT;
+ return 0;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+ if (TDTI == TagDeclTypes.end()) return;
+
+ // Remember the opaque LLVM type for this tagdecl.
+ llvm::PATypeHolder OpaqueHolder = TDTI->second;
+ assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
+ "Updating compilation of an already non-opaque type?");
+
+ // Remove it from TagDeclTypes so that it will be regenerated.
+ TagDeclTypes.erase(TDTI);
+
+ // Generate the new type.
+ const llvm::Type *NT = ConvertTagDeclType(TD);
+
+ // Refine the old opaque type to its new definition.
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
+
+ // Since we just completed a tag type, check to see if any function types
+ // were completed along with the tag type.
+ // FIXME: This is very inefficient; if we track which function types depend
+ // on which tag types, though, it should be reasonably efficient.
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
+ for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
+ if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
+ // This function type still depends on an incomplete tag type; make sure
+ // that tag type has an associated opaque type.
+ ConvertTagDeclType(TT->getDecl());
+ } else {
+ // This function no longer depends on an incomplete tag type; create the
+ // function type, and refine the opaque type to the new function type.
+ llvm::PATypeHolder OpaqueHolder = i->second;
+ const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
+ cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
+ FunctionTypes.erase(i);
+ }
+ }
+}
+
+static const llvm::Type* getTypeForFormat(const llvm::fltSemantics &format) {
+ if (&format == &llvm::APFloat::IEEEsingle)
+ return llvm::Type::FloatTy;
+ if (&format == &llvm::APFloat::IEEEdouble)
+ return llvm::Type::DoubleTy;
+ if (&format == &llvm::APFloat::IEEEquad)
+ return llvm::Type::FP128Ty;
+ if (&format == &llvm::APFloat::PPCDoubleDouble)
+ return llvm::Type::PPC_FP128Ty;
+ if (&format == &llvm::APFloat::x87DoubleExtended)
+ return llvm::Type::X86_FP80Ty;
+ assert(0 && "Unknown float format!");
+ return 0;
+}
+
+const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
+ const clang::Type &Ty = *Context.getCanonicalType(T);
+
+ switch (Ty.getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Non-canonical or dependent types aren't possible.");
+ break;
+
+ case Type::Builtin: {
+ switch (cast<BuiltinType>(Ty).getKind()) {
+ default: assert(0 && "Unknown builtin type!");
+ case BuiltinType::Void:
+ // LLVM void type can only be used as the result of a function call. Just
+ // map to the same as char.
+ return llvm::IntegerType::get(8);
+
+ case BuiltinType::Bool:
+ // Note that we always return bool as i1 for use as a scalar type.
+ return llvm::Type::Int1Ty;
+
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::WChar:
+ return llvm::IntegerType::get(
+ static_cast<unsigned>(Context.getTypeSize(T)));
+
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ return getTypeForFormat(Context.getFloatTypeSemantics(T));
+
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ return llvm::IntegerType::get(128);
+ }
+ break;
+ }
+ case Type::FixedWidthInt:
+ return llvm::IntegerType::get(cast<FixedWidthIntType>(T)->getWidth());
+ case Type::Complex: {
+ const llvm::Type *EltTy =
+ ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
+ return llvm::StructType::get(EltTy, EltTy, NULL);
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType &RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+ case Type::Pointer: {
+ const PointerType &PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy.getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
+ PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+ return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+ }
+
+ case Type::VariableArray: {
+ const VariableArrayType &A = cast<VariableArrayType>(Ty);
+ assert(A.getIndexTypeQualifier() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ return ConvertTypeForMemRecursive(A.getElementType());
+ }
+ case Type::IncompleteArray: {
+ const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
+ assert(A.getIndexTypeQualifier() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // int X[] -> [0 x int]
+ return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
+ }
+ case Type::ConstantArray: {
+ const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
+ const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
+ return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType &VT = cast<VectorType>(Ty);
+ return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
+ VT.getNumElements());
+ }
+ case Type::FunctionNoProto:
+ case Type::FunctionProto: {
+ // First, check whether we can build the full function type.
+ if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
+ // This function's type depends on an incomplete tag type; make sure
+ // we have an opaque type corresponding to the tag type.
+ ConvertTagDeclType(TT->getDecl());
+ // Create an opaque type for this function type, save it, and return it.
+ llvm::Type *ResultType = llvm::OpaqueType::get();
+ FunctionTypes.insert(std::make_pair(&Ty, ResultType));
+ return ResultType;
+ }
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
+ return GetFunctionType(getFunctionInfo(FPT), FPT->isVariadic());
+
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+ return GetFunctionType(getFunctionInfo(FNPT), true);
+ }
+
+ case Type::ExtQual:
+ return
+ ConvertTypeRecursive(QualType(cast<ExtQualType>(Ty).getBaseType(), 0));
+
+ case Type::ObjCQualifiedInterface: {
+ // Lower foo<P1,P2> just like foo.
+ ObjCInterfaceDecl *ID = cast<ObjCQualifiedInterfaceType>(Ty).getDecl();
+ return ConvertTypeRecursive(Context.getObjCInterfaceType(ID));
+ }
+
+ case Type::ObjCInterface: {
+ // Objective-C interfaces are always opaque (outside of the
+ // runtime, which can do whatever it likes); we never refine
+ // these.
+ const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
+ if (!T)
+ T = llvm::OpaqueType::get();
+ return T;
+ }
+
+ case Type::ObjCQualifiedId:
+ // Protocols don't influence the LLVM type.
+ return ConvertTypeRecursive(Context.getObjCIdType());
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *TD = cast<TagType>(Ty).getDecl();
+ const llvm::Type *Res = ConvertTagDeclType(TD);
+
+ std::string TypeName(TD->getKindName());
+ TypeName += '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (TD->getIdentifier())
+ TypeName += TD->getNameAsString();
+ else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
+ TypeName += TdT->getDecl()->getNameAsString();
+ else
+ TypeName += "anon";
+
+ TheModule.addTypeName(TypeName, Res);
+ return Res;
+ }
+
+ case Type::BlockPointer: {
+ const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
+ llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
+ PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
+ return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
+ }
+
+ case Type::MemberPointer: {
+ // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
+ // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
+ // If we ever want to support other ABIs this needs to be abstracted.
+
+ QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
+ if (ETy->isFunctionType()) {
+ return llvm::StructType::get(ConvertType(Context.getPointerDiffType()),
+ ConvertType(Context.getPointerDiffType()),
+ NULL);
+ } else
+ return ConvertType(Context.getPointerDiffType());
+ }
+
+ case Type::TemplateSpecialization:
+ assert(false && "Dependent types can't get here");
+ }
+
+ // FIXME: implement.
+ return llvm::OpaqueType::get();
+}
+
+/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+/// enum.
+const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
+ // TagDecl's are not necessarily unique, instead use the (clang)
+ // type connected to the decl.
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ TagDeclTypes.find(Key);
+
+ // If we've already compiled this tag type, use the previous definition.
+ if (TDTI != TagDeclTypes.end())
+ return TDTI->second;
+
+ // If this is still a forward definition, just define an opaque type to use
+ // for this tagged decl.
+ if (!TD->isDefinition()) {
+ llvm::Type *ResultType = llvm::OpaqueType::get();
+ TagDeclTypes.insert(std::make_pair(Key, ResultType));
+ return ResultType;
+ }
+
+ // Okay, this is a definition of a type. Compile the implementation now.
+
+ if (TD->isEnum()) {
+ // Don't bother storing enums in TagDeclTypes.
+ return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
+ }
+
+ // This decl could well be recursive. In this case, insert an opaque
+ // definition of this type, which the recursive uses will get. We will then
+ // refine this opaque version later.
+
+ // Create new OpaqueType now for later use in case this is a recursive
+ // type. This will later be refined to the actual type.
+ llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get();
+ TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
+
+ const llvm::Type *ResultType;
+ const RecordDecl *RD = cast<const RecordDecl>(TD);
+
+ // There isn't any extra information for empty structures/unions.
+ if (RD->field_empty(getContext())) {
+ ResultType = llvm::StructType::get(std::vector<const llvm::Type*>());
+ } else {
+ // Layout fields.
+ RecordOrganizer RO(*this, *RD);
+
+ if (TD->isStruct() || TD->isClass())
+ RO.layoutStructFields(Context.getASTRecordLayout(RD));
+ else {
+ assert(TD->isUnion() && "unknown tag decl kind!");
+ RO.layoutUnionFields(Context.getASTRecordLayout(RD));
+ }
+
+ // Get llvm::StructType.
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ CGRecordLayouts[Key] = new CGRecordLayout(RO.getLLVMType(),
+ RO.getPaddingFields());
+ ResultType = RO.getLLVMType();
+ }
+
+ // Refine our Opaque type to ResultType. This can invalidate ResultType, so
+ // make sure to read the result out of the holder.
+ cast<llvm::OpaqueType>(ResultHolder.get())
+ ->refineAbstractTypeTo(ResultType);
+
+ return ResultHolder.get();
+}
+
+/// getLLVMFieldNo - Return llvm::StructType element number
+/// that corresponds to the field FD.
+unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
+ llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD);
+ assert (I != FieldInfo.end() && "Unable to find field info");
+ return I->second;
+}
+
+/// addFieldInfo - Assign field number to field FD.
+void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) {
+ FieldInfo[FD] = No;
+}
+
+/// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD.
+CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) {
+ llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator
+ I = BitFields.find(FD);
+ assert (I != BitFields.end() && "Unable to find bitfield info");
+ return I->second;
+}
+
+/// addBitFieldInfo - Assign a start bit and a size to field FD.
+void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned Begin,
+ unsigned Size) {
+ BitFields.insert(std::make_pair(FD, BitFieldInfo(Begin, Size)));
+}
+
+/// getCGRecordLayout - Return record layout info for the given llvm::Type.
+const CGRecordLayout *
+CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
+ const Type *Key =
+ Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
+ llvm::DenseMap<const Type*, CGRecordLayout *>::iterator I
+ = CGRecordLayouts.find(Key);
+ assert (I != CGRecordLayouts.end()
+ && "Unable to find record layout information for type");
+ return I->second;
+}
+
+/// layoutStructFields - Do the actual work and lay out all fields. Create
+/// corresponding llvm struct type.
+/// Note that this doesn't actually try to do struct layout; it depends on
+/// the layout built by the AST. (We have to do struct layout to do Sema,
+/// and there's no point to duplicating the work.)
+void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) {
+ // FIXME: This code currently always generates packed structures.
+ // Unpacked structures are more readable, and sometimes more efficient!
+ // (But note that any changes here are likely to impact CGExprConstant,
+ // which makes some messy assumptions.)
+ uint64_t llvmSize = 0;
+ // FIXME: Make this a SmallVector
+ std::vector<const llvm::Type*> LLVMFields;
+
+ unsigned curField = 0;
+ for (RecordDecl::field_iterator Field = RD.field_begin(CGT.getContext()),
+ FieldEnd = RD.field_end(CGT.getContext());
+ Field != FieldEnd; ++Field) {
+ uint64_t offset = RL.getFieldOffset(curField);
+ const llvm::Type *Ty = CGT.ConvertTypeForMemRecursive(Field->getType());
+ uint64_t size = CGT.getTargetData().getTypeAllocSizeInBits(Ty);
+
+ if (Field->isBitField()) {
+ uint64_t BitFieldSize =
+ Field->getBitWidth()->EvaluateAsInt(CGT.getContext()).getZExtValue();
+
+ // Bitfield field info is different from other field info;
+ // it actually ignores the underlying LLVM struct because
+ // there isn't any convenient mapping.
+ CGT.addFieldInfo(*Field, offset / size);
+ CGT.addBitFieldInfo(*Field, offset % size, BitFieldSize);
+ } else {
+ // Put the element into the struct. This would be simpler
+ // if we didn't bother, but it seems a bit too strange to
+ // allocate all structs as i8 arrays.
+ while (llvmSize < offset) {
+ LLVMFields.push_back(llvm::Type::Int8Ty);
+ llvmSize += 8;
+ }
+
+ llvmSize += size;
+ CGT.addFieldInfo(*Field, LLVMFields.size());
+ LLVMFields.push_back(Ty);
+ }
+ ++curField;
+ }
+
+ while (llvmSize < RL.getSize()) {
+ LLVMFields.push_back(llvm::Type::Int8Ty);
+ llvmSize += 8;
+ }
+
+ STy = llvm::StructType::get(LLVMFields, true);
+ assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());
+}
+
+/// layoutUnionFields - Do the actual work and lay out all fields. Create
+/// corresponding llvm struct type. This should be invoked only after
+/// all fields are added.
+void RecordOrganizer::layoutUnionFields(const ASTRecordLayout &RL) {
+ unsigned curField = 0;
+ for (RecordDecl::field_iterator Field = RD.field_begin(CGT.getContext()),
+ FieldEnd = RD.field_end(CGT.getContext());
+ Field != FieldEnd; ++Field) {
+ // The offset should usually be zero, but bitfields could be strange
+ uint64_t offset = RL.getFieldOffset(curField);
+ CGT.ConvertTypeRecursive(Field->getType());
+
+ if (Field->isBitField()) {
+ Expr *BitWidth = Field->getBitWidth();
+ uint64_t BitFieldSize =
+ BitWidth->EvaluateAsInt(CGT.getContext()).getZExtValue();
+
+ CGT.addFieldInfo(*Field, 0);
+ CGT.addBitFieldInfo(*Field, offset, BitFieldSize);
+ } else {
+ CGT.addFieldInfo(*Field, 0);
+ }
+ ++curField;
+ }
+
+ // This looks stupid, but it is correct in the sense that
+ // it works no matter how complicated the sizes and alignments
+ // of the union elements are. The natural alignment
+ // of the result doesn't matter because anyone allocating
+ // structures should be aligning them appropriately anyway.
+ // FIXME: We can be a bit more intuitive in a lot of cases.
+ // FIXME: Make this a struct type to work around PR2399; the
+ // C backend doesn't like structs using array types.
+ std::vector<const llvm::Type*> LLVMFields;
+ LLVMFields.push_back(llvm::ArrayType::get(llvm::Type::Int8Ty,
+ RL.getSize() / 8));
+ STy = llvm::StructType::get(LLVMFields, true);
+ assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());
+}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..b72d8e9
--- /dev/null
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,212 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTYPES_H
+#define CLANG_CODEGEN_CODEGENTYPES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include <vector>
+
+#include "CGCall.h"
+
+namespace llvm {
+ class FunctionType;
+ class Module;
+ class OpaqueType;
+ class PATypeHolder;
+ class TargetData;
+ class Type;
+}
+
+namespace clang {
+ class ABIInfo;
+ class ASTContext;
+ class CXXMethodDecl;
+ class FieldDecl;
+ class FunctionProtoType;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class PointerType;
+ class QualType;
+ class RecordDecl;
+ class TagDecl;
+ class TargetInfo;
+ class Type;
+
+namespace CodeGen {
+ class CodeGenTypes;
+
+ /// CGRecordLayout - This class handles struct and union layout info while
+ /// lowering AST types to LLVM types.
+ class CGRecordLayout {
+ CGRecordLayout(); // DO NOT IMPLEMENT
+ public:
+ CGRecordLayout(llvm::Type *T, llvm::SmallSet<unsigned, 8> &PF)
+ : STy(T), PaddingFields(PF) {
+ // FIXME : Collect info about fields that requires adjustments
+ // (i.e. fields that do not directly map to llvm struct fields.)
+ }
+
+ /// getLLVMType - Return llvm type associated with this record.
+ llvm::Type *getLLVMType() const {
+ return STy;
+ }
+
+ bool isPaddingField(unsigned No) const {
+ return PaddingFields.count(No) != 0;
+ }
+
+ unsigned getNumPaddingFields() {
+ return PaddingFields.size();
+ }
+
+ private:
+ llvm::Type *STy;
+ llvm::SmallSet<unsigned, 8> PaddingFields;
+ };
+
+/// CodeGenTypes - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTypes {
+ ASTContext &Context;
+ TargetInfo &Target;
+ llvm::Module& TheModule;
+ const llvm::TargetData& TheTargetData;
+ mutable const ABIInfo* TheABIInfo;
+
+ llvm::SmallVector<std::pair<QualType,
+ llvm::OpaqueType *>, 8> PointersToResolve;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes;
+
+ llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes;
+
+ /// The opaque type map for Objective-C interfaces. All direct
+ /// manipulation is done by the runtime interfaces, which are
+ /// responsible for coercing to the appropriate type; these opaque
+ /// types are never refined.
+ llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
+
+ /// CGRecordLayouts - This maps llvm struct type with corresponding
+ /// record layout info.
+ /// FIXME : If CGRecordLayout is less than 16 bytes then use
+ /// inline it in the map.
+ llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+ /// FieldInfo - This maps struct field with corresponding llvm struct type
+ /// field no. This info is populated by record organizer.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// FunctionInfos - Hold memoized CGFunctionInfo results.
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+public:
+ class BitFieldInfo {
+ public:
+ explicit BitFieldInfo(unsigned short B, unsigned short S)
+ : Begin(B), Size(S) {}
+
+ unsigned short Begin;
+ unsigned short Size;
+ };
+
+private:
+ llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields;
+
+ /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
+ /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
+ /// used instead of llvm::Type because it allows us to bypass potential
+ /// dangling type pointers due to type refinement on llvm side.
+ llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
+
+ /// ConvertNewType - Convert type T into a llvm::Type. Do not use this
+ /// method directly because it does not do any type caching. This method
+ /// is available only for ConvertType(). CovertType() is preferred
+ /// interface to convert type T into a llvm::Type.
+ const llvm::Type *ConvertNewType(QualType T);
+public:
+ CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD);
+ ~CodeGenTypes();
+
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ TargetInfo &getTarget() const { return Target; }
+ ASTContext &getContext() const { return Context; }
+ const ABIInfo &getABIInfo() const;
+
+ /// ConvertType - Convert type T into a llvm::Type.
+ const llvm::Type *ConvertType(QualType T);
+ const llvm::Type *ConvertTypeRecursive(QualType T);
+
+ /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+ /// ConvertType in that it is used to convert to the memory representation for
+ /// a type. For example, the scalar representation for _Bool is i1, but the
+ /// memory representation is usually i8 or i32, depending on the target.
+ const llvm::Type *ConvertTypeForMem(QualType T);
+ const llvm::Type *ConvertTypeForMemRecursive(QualType T);
+
+ /// GetFunctionType - Get the LLVM function type for \arg Info.
+ const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
+ bool IsVariadic);
+
+ const CGRecordLayout *getCGRecordLayout(const TagDecl*) const;
+
+ /// getLLVMFieldNo - Return llvm::StructType element number
+ /// that corresponds to the field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD);
+
+ /// UpdateCompletedType - When we find the full definition for a TagDecl,
+ /// replace the 'opaque' type we previously made for it if applicable.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ /// getFunctionInfo - Get the CGFunctionInfo for this function signature.
+ const CGFunctionInfo &getFunctionInfo(QualType RetTy,
+ const llvm::SmallVector<QualType,16>
+ &ArgTys);
+
+ const CGFunctionInfo &getFunctionInfo(const FunctionNoProtoType *FTNP);
+ const CGFunctionInfo &getFunctionInfo(const FunctionProtoType *FTP);
+ const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
+ const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const CallArgList &Args);
+public:
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args);
+
+public: // These are internal details of CGT that shouldn't be used externally.
+ /// addFieldInfo - Assign field number to field FD.
+ void addFieldInfo(const FieldDecl *FD, unsigned No);
+
+ /// addBitFieldInfo - Assign a start bit and a size to field FD.
+ void addBitFieldInfo(const FieldDecl *FD, unsigned Begin, unsigned Size);
+
+ /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field
+ /// FD.
+ BitFieldInfo getBitFieldInfo(const FieldDecl *FD);
+
+ /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+ /// enum.
+ const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
+
+ /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as on the provided vector \arg
+ /// ArgTys. See ABIArgInfo::Expand.
+ void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/Makefile b/lib/CodeGen/Makefile
new file mode 100644
index 0000000..e716fe7
--- /dev/null
+++ b/lib/CodeGen/Makefile
@@ -0,0 +1,23 @@
+##===- clang/lib/CodeGen/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST -> LLVM code generation library for the
+# C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangCodeGen
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
new file mode 100644
index 0000000..6ee1223
--- /dev/null
+++ b/lib/CodeGen/Mangle.cpp
@@ -0,0 +1,772 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ class VISIBILITY_HIDDEN CXXNameMangler {
+ ASTContext &Context;
+ llvm::raw_ostream &Out;
+
+ const CXXMethodDecl *Structor;
+ unsigned StructorType;
+ CXXCtorType CtorType;
+
+ public:
+ CXXNameMangler(ASTContext &C, llvm::raw_ostream &os)
+ : Context(C), Out(os), Structor(0), StructorType(0) { }
+
+ bool mangle(const NamedDecl *D);
+ void mangleGuardVariable(const VarDecl *D);
+
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type);
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ private:
+ bool mangleFunctionDecl(const FunctionDecl *FD);
+
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleName(const NamedDecl *ND);
+ void mangleUnqualifiedName(const NamedDecl *ND);
+ void mangleSourceName(const IdentifierInfo *II);
+ void mangleLocalName(const NamedDecl *ND);
+ void mangleNestedName(const NamedDecl *ND);
+ void manglePrefix(const DeclContext *DC);
+ void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+ void mangleCVQualifiers(unsigned Quals);
+ void mangleType(QualType T);
+ void mangleType(const BuiltinType *T);
+ void mangleType(const FunctionType *T);
+ void mangleBareFunctionType(const FunctionType *T, bool MangleReturnType);
+ void mangleType(const TagType *T);
+ void mangleType(const ArrayType *T);
+ void mangleType(const MemberPointerType *T);
+ void mangleType(const TemplateTypeParmType *T);
+ void mangleType(const ObjCInterfaceType *T);
+ void mangleExpression(Expr *E);
+ void mangleCXXCtorType(CXXCtorType T);
+ void mangleCXXDtorType(CXXDtorType T);
+
+ void mangleTemplateArgumentList(const TemplateArgumentList &L);
+ void mangleTemplateArgument(const TemplateArgument &A);
+ };
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool CXXNameMangler::mangleFunctionDecl(const FunctionDecl *FD) {
+ // Clang's "overloadable" attribute extension to C/C++ implies
+ // name mangling (always).
+ if (FD->hasAttr<OverloadableAttr>()) {
+ ; // fall into mangling code unconditionally.
+ } else if (// C functions are not mangled
+ !Context.getLangOptions().CPlusPlus ||
+ // "main" is not mangled in C++
+ FD->isMain() ||
+ // No mangling in an "implicit extern C" header.
+ (FD->getLocation().isValid() &&
+ Context.getSourceManager().getFileCharacteristic(FD->getLocation()))
+ == SrcMgr::C_ExternCSystem ||
+ // No name mangling in a C linkage specification.
+ isInCLinkageSpecification(FD))
+ return false;
+
+ // If we get here, mangle the decl name!
+ Out << "_Z";
+ mangleFunctionEncoding(FD);
+ return true;
+}
+
+bool CXXNameMangler::mangle(const NamedDecl *D) {
+ // Any decl can be declared with __asm("foo") on it, and this takes
+ // precedence over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+ Out << ALA->getLabel();
+ return true;
+ }
+
+ // <mangled-name> ::= _Z <encoding>
+ // ::= <data name>
+ // ::= <special-name>
+
+ // FIXME: Actually use a visitor to decode these?
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return mangleFunctionDecl(FD);
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!Context.getLangOptions().CPlusPlus ||
+ isInCLinkageSpecification(D) ||
+ D->getDeclContext()->isTranslationUnit())
+ return false;
+
+ Out << "_Z";
+ mangleName(VD);
+ return true;
+ }
+
+ return false;
+}
+
+void CXXNameMangler::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type) {
+ assert(!Structor && "Structor already set!");
+ Structor = D;
+ StructorType = Type;
+
+ mangle(D);
+}
+
+void CXXNameMangler::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type) {
+ assert(!Structor && "Structor already set!");
+ Structor = D;
+ StructorType = Type;
+
+ mangle(D);
+}
+
+void CXXNameMangler::mangleGuardVariable(const VarDecl *D)
+{
+ // <special-name> ::= GV <object name> # Guard variable for one-time
+ // # initialization
+
+ Out << "_ZGV";
+ mangleName(D);
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <encoding> ::= <function name> <bare-function-type>
+ mangleName(FD);
+ mangleBareFunctionType(FD->getType()->getAsFunctionType(), false);
+}
+
+static bool isStdNamespace(const DeclContext *DC) {
+ if (!DC->isNamespace() || !DC->getParent()->isTranslationUnit())
+ return false;
+
+ const NamespaceDecl *NS = cast<NamespaceDecl>(DC);
+ return NS->getOriginalNamespace()->getIdentifier()->isStr("std");
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <nested-name>
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name> # See Scope Encoding below
+ //
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name> # ::std::
+ if (ND->getDeclContext()->isTranslationUnit())
+ mangleUnqualifiedName(ND);
+ else if (isStdNamespace(ND->getDeclContext())) {
+ Out << "St";
+ mangleUnqualifiedName(ND);
+ } else if (isa<FunctionDecl>(ND->getDeclContext()))
+ mangleLocalName(ND);
+ else
+ mangleNestedName(ND);
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ DeclarationName Name = ND->getDeclName();
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ mangleSourceName(Name.getAsIdentifierInfo());
+ break;
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ assert(false && "Can't mangle Objective-C selector names here!");
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ constructor we're mangling, use the
+ // type we were given.
+ mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+ else
+ // Otherwise, use the complete constructor name. This is relevant if a
+ // class with a constructor is declared within a constructor.
+ mangleCXXCtorType(Ctor_Complete);
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling, use the
+ // type we were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the complete destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Complete);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= cv <type> # (cast)
+ Out << "cv";
+ mangleType(Context.getCanonicalType(Name.getCXXNameType()));
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator(),
+ cast<FunctionDecl>(ND)->getNumParams());
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ assert(false && "Can't mangle a using directive name!");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND) {
+ // <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+ // FIXME: no template support
+ Out << 'N';
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND))
+ mangleCVQualifiers(Method->getTypeQualifiers());
+ manglePrefix(ND->getDeclContext());
+ mangleUnqualifiedName(ND);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
+ // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+ // := Z <function encoding> E s [<discriminator>]
+ // <discriminator> := _ <non-negative number>
+ Out << 'Z';
+ mangleFunctionEncoding(cast<FunctionDecl>(ND->getDeclContext()));
+ Out << 'E';
+ mangleSourceName(ND->getIdentifier());
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC) {
+ // <prefix> ::= <prefix> <unqualified-name>
+ // ::= <template-prefix> <template-args>
+ // ::= <template-param>
+ // ::= # empty
+ // ::= <substitution>
+ // FIXME: We only handle mangling of namespaces and classes at the moment.
+ if (!DC->getParent()->isTranslationUnit())
+ manglePrefix(DC->getParent());
+
+ if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(DC))
+ mangleSourceName(Namespace->getIdentifier());
+ else if (const RecordDecl *Record = dyn_cast<RecordDecl>(DC)) {
+ if (const ClassTemplateSpecializationDecl *D =
+ dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
+ mangleType(QualType(D->getTypeForDecl(), 0));
+ } else
+ mangleSourceName(Record->getIdentifier());
+ }
+}
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+ switch (OO) {
+ // <operator-name> ::= nw # new
+ case OO_New: Out << "nw"; break;
+ // ::= na # new[]
+ case OO_Array_New: Out << "na"; break;
+ // ::= dl # delete
+ case OO_Delete: Out << "dl"; break;
+ // ::= da # delete[]
+ case OO_Array_Delete: Out << "da"; break;
+ // ::= ps # + (unary)
+ // ::= pl # +
+ case OO_Plus: Out << (Arity == 1? "ps" : "pl"); break;
+ // ::= ng # - (unary)
+ // ::= mi # -
+ case OO_Minus: Out << (Arity == 1? "ng" : "mi"); break;
+ // ::= ad # & (unary)
+ // ::= an # &
+ case OO_Amp: Out << (Arity == 1? "ad" : "an"); break;
+ // ::= de # * (unary)
+ // ::= ml # *
+ case OO_Star: Out << (Arity == 1? "de" : "ml"); break;
+ // ::= co # ~
+ case OO_Tilde: Out << "co"; break;
+ // ::= dv # /
+ case OO_Slash: Out << "dv"; break;
+ // ::= rm # %
+ case OO_Percent: Out << "rm"; break;
+ // ::= or # |
+ case OO_Pipe: Out << "or"; break;
+ // ::= eo # ^
+ case OO_Caret: Out << "eo"; break;
+ // ::= aS # =
+ case OO_Equal: Out << "aS"; break;
+ // ::= pL # +=
+ case OO_PlusEqual: Out << "pL"; break;
+ // ::= mI # -=
+ case OO_MinusEqual: Out << "mI"; break;
+ // ::= mL # *=
+ case OO_StarEqual: Out << "mL"; break;
+ // ::= dV # /=
+ case OO_SlashEqual: Out << "dV"; break;
+ // ::= rM # %=
+ case OO_PercentEqual: Out << "rM"; break;
+ // ::= aN # &=
+ case OO_AmpEqual: Out << "aN"; break;
+ // ::= oR # |=
+ case OO_PipeEqual: Out << "oR"; break;
+ // ::= eO # ^=
+ case OO_CaretEqual: Out << "eO"; break;
+ // ::= ls # <<
+ case OO_LessLess: Out << "ls"; break;
+ // ::= rs # >>
+ case OO_GreaterGreater: Out << "rs"; break;
+ // ::= lS # <<=
+ case OO_LessLessEqual: Out << "lS"; break;
+ // ::= rS # >>=
+ case OO_GreaterGreaterEqual: Out << "rS"; break;
+ // ::= eq # ==
+ case OO_EqualEqual: Out << "eq"; break;
+ // ::= ne # !=
+ case OO_ExclaimEqual: Out << "ne"; break;
+ // ::= lt # <
+ case OO_Less: Out << "lt"; break;
+ // ::= gt # >
+ case OO_Greater: Out << "gt"; break;
+ // ::= le # <=
+ case OO_LessEqual: Out << "le"; break;
+ // ::= ge # >=
+ case OO_GreaterEqual: Out << "ge"; break;
+ // ::= nt # !
+ case OO_Exclaim: Out << "nt"; break;
+ // ::= aa # &&
+ case OO_AmpAmp: Out << "aa"; break;
+ // ::= oo # ||
+ case OO_PipePipe: Out << "oo"; break;
+ // ::= pp # ++
+ case OO_PlusPlus: Out << "pp"; break;
+ // ::= mm # --
+ case OO_MinusMinus: Out << "mm"; break;
+ // ::= cm # ,
+ case OO_Comma: Out << "cm"; break;
+ // ::= pm # ->*
+ case OO_ArrowStar: Out << "pm"; break;
+ // ::= pt # ->
+ case OO_Arrow: Out << "pt"; break;
+ // ::= cl # ()
+ case OO_Call: Out << "cl"; break;
+ // ::= ix # []
+ case OO_Subscript: Out << "ix"; break;
+ // UNSUPPORTED: ::= qu # ?
+
+ case OO_None:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Not an overloaded operator");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCVQualifiers(unsigned Quals) {
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ if (Quals & QualType::Restrict)
+ Out << 'r';
+ if (Quals & QualType::Volatile)
+ Out << 'V';
+ if (Quals & QualType::Const)
+ Out << 'K';
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = Context.getCanonicalType(T);
+
+ // FIXME: Should we have a TypeNodes.def to make this easier? (YES!)
+
+ // <type> ::= <CV-qualifiers> <type>
+ mangleCVQualifiers(T.getCVRQualifiers());
+
+ // ::= <builtin-type>
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
+ mangleType(BT);
+ // ::= <function-type>
+ else if (const FunctionType *FT = dyn_cast<FunctionType>(T.getTypePtr()))
+ mangleType(FT);
+ // ::= <class-enum-type>
+ else if (const TagType *TT = dyn_cast<TagType>(T.getTypePtr()))
+ mangleType(TT);
+ // ::= <array-type>
+ else if (const ArrayType *AT = dyn_cast<ArrayType>(T.getTypePtr()))
+ mangleType(AT);
+ // ::= <pointer-to-member-type>
+ else if (const MemberPointerType *MPT
+ = dyn_cast<MemberPointerType>(T.getTypePtr()))
+ mangleType(MPT);
+ // ::= <template-param>
+ else if (const TemplateTypeParmType *TypeParm
+ = dyn_cast<TemplateTypeParmType>(T.getTypePtr()))
+ mangleType(TypeParm);
+ // FIXME: ::= <template-template-param> <template-args>
+ // FIXME: ::= <substitution> # See Compression below
+ // ::= P <type> # pointer-to
+ else if (const PointerType *PT = dyn_cast<PointerType>(T.getTypePtr())) {
+ Out << 'P';
+ mangleType(PT->getPointeeType());
+ }
+ // ::= R <type> # reference-to
+ else if (const LValueReferenceType *RT =
+ dyn_cast<LValueReferenceType>(T.getTypePtr())) {
+ Out << 'R';
+ mangleType(RT->getPointeeType());
+ }
+ // ::= O <type> # rvalue reference-to (C++0x)
+ else if (const RValueReferenceType *RT =
+ dyn_cast<RValueReferenceType>(T.getTypePtr())) {
+ Out << 'O';
+ mangleType(RT->getPointeeType());
+ }
+ // ::= C <type> # complex pair (C 2000)
+ else if (const ComplexType *CT = dyn_cast<ComplexType>(T.getTypePtr())) {
+ Out << 'C';
+ mangleType(CT->getElementType());
+ } else if (const VectorType *VT = dyn_cast<VectorType>(T.getTypePtr())) {
+ // GNU extension: vector types
+ Out << "U8__vector";
+ mangleType(VT->getElementType());
+ } else if (const ObjCInterfaceType *IT =
+ dyn_cast<ObjCInterfaceType>(T.getTypePtr())) {
+ mangleType(IT);
+ }
+ // FIXME: ::= G <type> # imaginary (C 2000)
+ // FIXME: ::= U <source-name> <type> # vendor extended type qualifier
+ else
+ assert(false && "Cannot mangle unknown type");
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+ // <builtin-type> ::= v # void
+ // ::= w # wchar_t
+ // ::= b # bool
+ // ::= c # char
+ // ::= a # signed char
+ // ::= h # unsigned char
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # int
+ // ::= j # unsigned int
+ // ::= l # long
+ // ::= m # unsigned long
+ // ::= x # long long, __int64
+ // ::= y # unsigned long long, __int64
+ // ::= n # __int128
+ // UNSUPPORTED: ::= o # unsigned __int128
+ // ::= f # float
+ // ::= d # double
+ // ::= e # long double, __float80
+ // UNSUPPORTED: ::= g # __float128
+ // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
+ // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
+ // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
+ // UNSUPPORTED: ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ // UNSUPPORTED: ::= Di # char32_t
+ // UNSUPPORTED: ::= Ds # char16_t
+ // ::= u <source-name> # vendor extended type
+ // From our point of view, std::nullptr_t is a builtin, but as far as mangling
+ // is concerned, it's a type called std::nullptr_t.
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'v'; break;
+ case BuiltinType::Bool: Out << 'b'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
+ case BuiltinType::UChar: Out << 'h'; break;
+ case BuiltinType::UShort: Out << 't'; break;
+ case BuiltinType::UInt: Out << 'j'; break;
+ case BuiltinType::ULong: Out << 'm'; break;
+ case BuiltinType::ULongLong: Out << 'y'; break;
+ case BuiltinType::UInt128: Out << 'o'; break;
+ case BuiltinType::SChar: Out << 'a'; break;
+ case BuiltinType::WChar: Out << 'w'; break;
+ case BuiltinType::Short: Out << 's'; break;
+ case BuiltinType::Int: Out << 'i'; break;
+ case BuiltinType::Long: Out << 'l'; break;
+ case BuiltinType::LongLong: Out << 'x'; break;
+ case BuiltinType::Int128: Out << 'n'; break;
+ case BuiltinType::Float: Out << 'f'; break;
+ case BuiltinType::Double: Out << 'd'; break;
+ case BuiltinType::LongDouble: Out << 'e'; break;
+ case BuiltinType::NullPtr: Out << "St9nullptr_t"; break;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ assert(false &&
+ "Overloaded and dependent types shouldn't get to name mangling");
+ break;
+ }
+}
+
+void CXXNameMangler::mangleType(const FunctionType *T) {
+ // <function-type> ::= F [Y] <bare-function-type> E
+ Out << 'F';
+ // FIXME: We don't have enough information in the AST to produce the 'Y'
+ // encoding for extern "C" function types.
+ mangleBareFunctionType(T, /*MangleReturnType=*/true);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType) {
+ // <bare-function-type> ::= <signature type>+
+ if (MangleReturnType)
+ mangleType(T->getResultType());
+
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(T);
+ assert(Proto && "Can't mangle K&R function prototypes");
+
+ if (Proto->getNumArgs() == 0) {
+ Out << 'v';
+ return;
+ }
+
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+}
+
+void CXXNameMangler::mangleType(const TagType *T) {
+ // <class-enum-type> ::= <name>
+
+ if (!T->getDecl()->getIdentifier())
+ mangleName(T->getDecl()->getTypedefForAnonDecl());
+ else
+ mangleName(T->getDecl());
+
+ // If this is a class template specialization, mangle the template
+ // arguments.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl()))
+ mangleTemplateArgumentList(Spec->getTemplateArgs());
+}
+
+void CXXNameMangler::mangleType(const ArrayType *T) {
+ // <array-type> ::= A <positive dimension number> _ <element type>
+ // ::= A [<dimension expression>] _ <element type>
+ Out << 'A';
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T))
+ Out << CAT->getSize();
+ else if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(T))
+ mangleExpression(VAT->getSizeExpr());
+ else if (const DependentSizedArrayType *DSAT
+ = dyn_cast<DependentSizedArrayType>(T))
+ mangleExpression(DSAT->getSizeExpr());
+
+ Out << '_';
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+ // <pointer-to-member-type> ::= M <class type> <member type>
+ Out << 'M';
+ mangleType(QualType(T->getClass(), 0));
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ mangleCVQualifiers(FPT->getTypeQuals());
+ mangleType(FPT);
+ } else
+ mangleType(PointeeType);
+}
+
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ // <template-param> ::= T_ # first template parameter
+ // ::= T <parameter-2 non-negative number> _
+ if (T->getIndex() == 0)
+ Out << "T_";
+ else
+ Out << 'T' << (T->getIndex() - 1) << '_';
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleExpression(Expr *E) {
+ assert(false && "Cannot mangle expressions yet");
+}
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+ // <ctor-dtor-name> ::= C1 # complete object constructor
+ // ::= C2 # base object constructor
+ // ::= C3 # complete object allocating constructor
+ //
+ switch (T) {
+ case Ctor_Complete:
+ Out << "C1";
+ break;
+ case Ctor_Base:
+ Out << "C2";
+ break;
+ case Ctor_CompleteAllocating:
+ Out << "C3";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // <ctor-dtor-name> ::= D0 # deleting destructor
+ // ::= D1 # complete object destructor
+ // ::= D2 # base object destructor
+ //
+ switch (T) {
+ case Dtor_Deleting:
+ Out << "D0";
+ break;
+ case Dtor_Complete:
+ Out << "D1";
+ break;
+ case Dtor_Base:
+ Out << "D2";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleTemplateArgumentList(const TemplateArgumentList &L) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << "I";
+
+ for (unsigned i = 0, e = L.size(); i != e; ++i) {
+ const TemplateArgument &A = L[i];
+
+ mangleTemplateArgument(A);
+ }
+
+ Out << "E";
+}
+
+void CXXNameMangler::mangleTemplateArgument(const TemplateArgument &A) {
+ // <template-arg> ::= <type> # type or template
+ // ::= X <expression> E # expression
+ // ::= <expr-primary> # simple expressions
+ // ::= I <template-arg>* E # argument pack
+ // ::= sp <expression> # pack expansion of (C++0x)
+ switch (A.getKind()) {
+ default:
+ assert(0 && "Unknown template argument kind!");
+ case TemplateArgument::Type:
+ mangleType(A.getAsType());
+ break;
+ case TemplateArgument::Integral:
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+
+ Out << 'L';
+
+ mangleType(A.getIntegralType());
+
+ const llvm::APSInt *Integral = A.getAsIntegral();
+ if (A.getIntegralType()->isBooleanType()) {
+ // Boolean values are encoded as 0/1.
+ Out << (Integral->getBoolValue() ? '1' : '0');
+ } else {
+ if (Integral->isNegative())
+ Out << 'n';
+ Integral->abs().print(Out, false);
+ }
+
+ Out << 'E';
+ break;
+ }
+}
+
+namespace clang {
+ /// \brief Mangles the name of the declaration D and emits that name
+ /// to the given output stream.
+ ///
+ /// If the declaration D requires a mangled name, this routine will
+ /// emit that mangled name to \p os and return true. Otherwise, \p
+ /// os will be unchanged and this routine will return false. In this
+ /// case, the caller should just emit the identifier of the declaration
+ /// (\c D->getIdentifier()) as its name.
+ bool mangleName(const NamedDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os) {
+ assert(!isa<CXXConstructorDecl>(D) &&
+ "Use mangleCXXCtor for constructor decls!");
+ assert(!isa<CXXDestructorDecl>(D) &&
+ "Use mangleCXXDtor for destructor decls!");
+
+ CXXNameMangler Mangler(Context, os);
+ if (!Mangler.mangle(D))
+ return false;
+
+ os.flush();
+ return true;
+ }
+
+ /// mangleGuardVariable - Returns the mangled name for a guard variable
+ /// for the passed in VarDecl.
+ void mangleGuardVariable(const VarDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os) {
+ CXXNameMangler Mangler(Context, os);
+ Mangler.mangleGuardVariable(D);
+
+ os.flush();
+ }
+
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os) {
+ CXXNameMangler Mangler(Context, os);
+ Mangler.mangleCXXCtor(D, Type);
+
+ os.flush();
+ }
+
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os) {
+ CXXNameMangler Mangler(Context, os);
+ Mangler.mangleCXXDtor(D, Type);
+
+ os.flush();
+ }
+
+
+}
+
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
new file mode 100644
index 0000000..77cbd97
--- /dev/null
+++ b/lib/CodeGen/Mangle.h
@@ -0,0 +1,44 @@
+//===--- Mangle.h - Mangle C++ Names ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_MANGLE_H
+#define LLVM_CLANG_CODEGEN_MANGLE_H
+
+#include "CGCXX.h"
+
+namespace llvm {
+ class raw_ostream;
+}
+
+namespace clang {
+ class ASTContext;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class NamedDecl;
+ class VarDecl;
+
+ bool mangleName(const NamedDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os);
+ void mangleGuardVariable(const VarDecl *D, ASTContext &Context,
+ llvm::raw_ostream &os);
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os);
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ ASTContext &Context, llvm::raw_ostream &os);
+}
+
+#endif
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..9b85df6
--- /dev/null
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,100 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CodeGenModule.h"
+#include "clang/Frontend/CompileOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+
+namespace {
+ class VISIBILITY_HIDDEN CodeGeneratorImpl : public CodeGenerator {
+ Diagnostic &Diags;
+ llvm::OwningPtr<const llvm::TargetData> TD;
+ ASTContext *Ctx;
+ const CompileOptions CompileOpts; // Intentionally copied in.
+ protected:
+ llvm::OwningPtr<llvm::Module> M;
+ llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
+ public:
+ CodeGeneratorImpl(Diagnostic &diags, const std::string& ModuleName,
+ const CompileOptions &CO)
+ : Diags(diags), CompileOpts(CO), M(new llvm::Module(ModuleName)) {}
+
+ virtual ~CodeGeneratorImpl() {}
+
+ virtual llvm::Module* GetModule() {
+ return M.get();
+ }
+
+ virtual llvm::Module* ReleaseModule() {
+ return M.take();
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+
+ M->setTargetTriple(Ctx->Target.getTargetTriple());
+ M->setDataLayout(Ctx->Target.getTargetDescription());
+ TD.reset(new llvm::TargetData(Ctx->Target.getTargetDescription()));
+ Builder.reset(new CodeGen::CodeGenModule(Context, CompileOpts,
+ *M, *TD, Diags));
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef DG) {
+ // Make sure to emit all elements of a Decl.
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ Builder->EmitTopLevelDecl(*I);
+ }
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// to (e.g. struct, union, enum, class) is completed. This allows the
+ /// client hack on the type, which can occur at any point in the file
+ /// (because these can be defined in declspecs).
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ Builder->UpdateCompletedType(D);
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ if (Diags.hasErrorOccurred()) {
+ M.reset();
+ return;
+ }
+
+ if (Builder)
+ Builder->Release();
+ };
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitTentativeDefinition(D);
+ }
+ };
+}
+
+CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
+ const std::string& ModuleName,
+ const CompileOptions &CO) {
+ return new CodeGeneratorImpl(Diags, ModuleName, CO);
+}
diff --git a/lib/CodeGen/README.txt b/lib/CodeGen/README.txt
new file mode 100644
index 0000000..f60cd03
--- /dev/null
+++ b/lib/CodeGen/README.txt
@@ -0,0 +1,65 @@
+IRgen optimization opportunities.
+
+//===---------------------------------------------------------------------===//
+
+The common pattern of
+--
+short x; // or char, etc
+(x == 10)
+--
+generates an zext/sext of x which can easily be avoided.
+
+//===---------------------------------------------------------------------===//
+
+Bitfields accesses can be shifted to simplify masking and sign
+extension. For example, if the bitfield width is 8 and it is
+appropriately aligned then is is a lot shorter to just load the char
+directly.
+
+//===---------------------------------------------------------------------===//
+
+It may be worth avoiding creation of alloca's for formal arguments
+for the common situation where the argument is never written to or has
+its address taken. The idea would be to begin generating code by using
+the argument directly and if its address is taken or it is stored to
+then generate the alloca and patch up the existing code.
+
+In theory, the same optimization could be a win for block local
+variables as long as the declaration dominates all statements in the
+block.
+
+NOTE: The main case we care about this for is for -O0 -g compile time
+performance, and in that scenario we will need to emit the alloca
+anyway currently to emit proper debug info. So this is blocked by
+being able to emit debug information which refers to an LLVM
+temporary, not an alloca.
+
+//===---------------------------------------------------------------------===//
+
+We should try and avoid generating basic blocks which only contain
+jumps. At -O0, this penalizes us all the way from IRgen (malloc &
+instruction overhead), all the way down through code generation and
+assembly time.
+
+On 176.gcc:expr.ll, it looks like over 12% of basic blocks are just
+direct branches!
+
+//===---------------------------------------------------------------------===//
+
+There are some more places where we could avoid generating unreachable code. For
+example:
+ void f0(int a) { abort(); if (a) printf("hi"); }
+still generates a call to printf. This doesn't occur much in real
+code, but would still be nice to clean up.
+
+//===---------------------------------------------------------------------===//
+
+Deferred generation of statics incurs some additional
+overhead. Currently it is even possible to construct test cases with
+O(N^2) behavior! For at least simple cases where we can tell a global
+is used, it is probably not worth deferring it. This doesn't solve the
+O(N^2) cases, ,though...
+
+PR3810
+
+//===---------------------------------------------------------------------===//
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
new file mode 100644
index 0000000..cabc33e
--- /dev/null
+++ b/lib/Driver/Action.cpp
@@ -0,0 +1,79 @@
+//===--- Action.cpp - Abstract compilation steps ------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Action.h"
+
+#include <cassert>
+using namespace clang::driver;
+
+Action::~Action() {
+ // FIXME: Free the inputs. The problem is that BindArchAction shares
+ // inputs; so we can't just walk the inputs.
+}
+
+const char *Action::getClassName(ActionClass AC) {
+ switch (AC) {
+ case InputClass: return "input";
+ case BindArchClass: return "bind-arch";
+ case PreprocessJobClass: return "preprocessor";
+ case PrecompileJobClass: return "precompiler";
+ case AnalyzeJobClass: return "analyzer";
+ case CompileJobClass: return "compiler";
+ case AssembleJobClass: return "assembler";
+ case LinkJobClass: return "linker";
+ case LipoJobClass: return "lipo";
+ }
+
+ assert(0 && "invalid class");
+ return 0;
+}
+
+InputAction::InputAction(const Arg &_Input, types::ID _Type)
+ : Action(InputClass, _Type), Input(_Input) {
+}
+
+BindArchAction::BindArchAction(Action *Input, const char *_ArchName)
+ : Action(BindArchClass, Input, Input->getType()), ArchName(_ArchName) {
+}
+
+JobAction::JobAction(ActionClass Kind, Action *Input, types::ID Type)
+ : Action(Kind, Input, Type) {
+}
+
+JobAction::JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type)
+ : Action(Kind, Inputs, Type) {
+}
+
+PreprocessJobAction::PreprocessJobAction(Action *Input, types::ID OutputType)
+ : JobAction(PreprocessJobClass, Input, OutputType) {
+}
+
+PrecompileJobAction::PrecompileJobAction(Action *Input, types::ID OutputType)
+ : JobAction(PrecompileJobClass, Input, OutputType) {
+}
+
+AnalyzeJobAction::AnalyzeJobAction(Action *Input, types::ID OutputType)
+ : JobAction(AnalyzeJobClass, Input, OutputType) {
+}
+
+CompileJobAction::CompileJobAction(Action *Input, types::ID OutputType)
+ : JobAction(CompileJobClass, Input, OutputType) {
+}
+
+AssembleJobAction::AssembleJobAction(Action *Input, types::ID OutputType)
+ : JobAction(AssembleJobClass, Input, OutputType) {
+}
+
+LinkJobAction::LinkJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(LinkJobClass, Inputs, Type) {
+}
+
+LipoJobAction::LipoJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(LipoJobClass, Inputs, Type) {
+}
diff --git a/lib/Driver/Arg.cpp b/lib/Driver/Arg.cpp
new file mode 100644
index 0000000..e227d7e
--- /dev/null
+++ b/lib/Driver/Arg.cpp
@@ -0,0 +1,192 @@
+//===--- Arg.cpp - Argument Implementations -----------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Option.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang::driver;
+
+Arg::Arg(ArgClass _Kind, const Option *_Opt, unsigned _Index,
+ const Arg *_BaseArg)
+ : Kind(_Kind), Opt(_Opt), BaseArg(_BaseArg), Index(_Index), Claimed(false)
+{
+}
+
+Arg::~Arg() { }
+
+void Arg::dump() const {
+ llvm::errs() << "<";
+ switch (Kind) {
+ default:
+ assert(0 && "Invalid kind");
+#define P(N) case N: llvm::errs() << #N; break
+ P(FlagClass);
+ P(PositionalClass);
+ P(JoinedClass);
+ P(SeparateClass);
+ P(CommaJoinedClass);
+ P(JoinedAndSeparateClass);
+#undef P
+ }
+
+ llvm::errs() << " Opt:";
+ Opt->dump();
+
+ llvm::errs() << " Index:" << Index;
+
+ if (isa<CommaJoinedArg>(this) || isa<SeparateArg>(this))
+ llvm::errs() << " NumValues:" << getNumValues();
+
+ llvm::errs() << ">\n";
+}
+
+std::string Arg::getAsString(const ArgList &Args) const {
+ std::string Res;
+ llvm::raw_string_ostream OS(Res);
+
+ ArgStringList ASL;
+ render(Args, ASL);
+ for (ArgStringList::iterator
+ it = ASL.begin(), ie = ASL.end(); it != ie; ++it) {
+ if (it != ASL.begin())
+ OS << ' ';
+ OS << *it;
+ }
+
+ return OS.str();
+}
+
+void Arg::renderAsInput(const ArgList &Args, ArgStringList &Output) const {
+ if (!getOption().hasNoOptAsInput()) {
+ render(Args, Output);
+ return;
+ }
+
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ Output.push_back(getValue(Args, i));
+}
+
+FlagArg::FlagArg(const Option *Opt, unsigned Index, const Arg *BaseArg)
+ : Arg(FlagClass, Opt, Index, BaseArg) {
+}
+
+void FlagArg::render(const ArgList &Args, ArgStringList &Output) const {
+ Output.push_back(Args.getArgString(getIndex()));
+}
+
+const char *FlagArg::getValue(const ArgList &Args, unsigned N) const {
+ assert(0 && "Invalid index.");
+ return 0;
+}
+
+PositionalArg::PositionalArg(const Option *Opt, unsigned Index,
+ const Arg *BaseArg)
+ : Arg(PositionalClass, Opt, Index, BaseArg) {
+}
+
+void PositionalArg::render(const ArgList &Args, ArgStringList &Output) const {
+ Output.push_back(Args.getArgString(getIndex()));
+}
+
+const char *PositionalArg::getValue(const ArgList &Args, unsigned N) const {
+ assert(N < getNumValues() && "Invalid index.");
+ return Args.getArgString(getIndex());
+}
+
+JoinedArg::JoinedArg(const Option *Opt, unsigned Index, const Arg *BaseArg)
+ : Arg(JoinedClass, Opt, Index, BaseArg) {
+}
+
+void JoinedArg::render(const ArgList &Args, ArgStringList &Output) const {
+ if (getOption().hasForceSeparateRender()) {
+ Output.push_back(getOption().getName());
+ Output.push_back(getValue(Args, 0));
+ } else {
+ Output.push_back(Args.getArgString(getIndex()));
+ }
+}
+
+const char *JoinedArg::getValue(const ArgList &Args, unsigned N) const {
+ assert(N < getNumValues() && "Invalid index.");
+ // FIXME: Avoid strlen.
+ return Args.getArgString(getIndex()) + strlen(getOption().getName());
+}
+
+CommaJoinedArg::CommaJoinedArg(const Option *Opt, unsigned Index,
+ const char *Str, const Arg *BaseArg)
+ : Arg(CommaJoinedClass, Opt, Index, BaseArg) {
+ const char *Prev = Str;
+ for (;; ++Str) {
+ char c = *Str;
+
+ if (!c) {
+ if (Prev != Str)
+ Values.push_back(std::string(Prev, Str));
+ break;
+ } else if (c == ',') {
+ if (Prev != Str)
+ Values.push_back(std::string(Prev, Str));
+ Prev = Str + 1;
+ }
+ }
+}
+
+void CommaJoinedArg::render(const ArgList &Args, ArgStringList &Output) const {
+ Output.push_back(Args.getArgString(getIndex()));
+}
+
+const char *CommaJoinedArg::getValue(const ArgList &Args, unsigned N) const {
+ assert(N < getNumValues() && "Invalid index.");
+ return Values[N].c_str();
+}
+
+SeparateArg::SeparateArg(const Option *Opt, unsigned Index, unsigned _NumValues,
+ const Arg *BaseArg)
+ : Arg(SeparateClass, Opt, Index, BaseArg), NumValues(_NumValues) {
+}
+
+void SeparateArg::render(const ArgList &Args, ArgStringList &Output) const {
+ if (getOption().hasForceJoinedRender()) {
+ assert(getNumValues() == 1 && "Cannot force joined render with > 1 args.");
+ // FIXME: Avoid std::string.
+ std::string Joined(getOption().getName());
+ Joined += Args.getArgString(getIndex());
+ Output.push_back(Args.MakeArgString(Joined.c_str()));
+ } else {
+ Output.push_back(Args.getArgString(getIndex()));
+ for (unsigned i = 0; i < NumValues; ++i)
+ Output.push_back(Args.getArgString(getIndex() + 1 + i));
+ }
+}
+
+const char *SeparateArg::getValue(const ArgList &Args, unsigned N) const {
+ assert(N < getNumValues() && "Invalid index.");
+ return Args.getArgString(getIndex() + 1 + N);
+}
+
+JoinedAndSeparateArg::JoinedAndSeparateArg(const Option *Opt, unsigned Index,
+ const Arg *BaseArg)
+ : Arg(JoinedAndSeparateClass, Opt, Index, BaseArg) {
+}
+
+void JoinedAndSeparateArg::render(const ArgList &Args,
+ ArgStringList &Output) const {
+ Output.push_back(Args.getArgString(getIndex()));
+ Output.push_back(Args.getArgString(getIndex() + 1));
+}
+
+const char *JoinedAndSeparateArg::getValue(const ArgList &Args,
+ unsigned N) const {
+ assert(N < getNumValues() && "Invalid index.");
+ if (N == 0)
+ return Args.getArgString(getIndex()) + strlen(getOption().getName());
+ return Args.getArgString(getIndex() + 1);
+}
diff --git a/lib/Driver/ArgList.cpp b/lib/Driver/ArgList.cpp
new file mode 100644
index 0000000..593694c
--- /dev/null
+++ b/lib/Driver/ArgList.cpp
@@ -0,0 +1,232 @@
+//===--- ArgList.cpp - Argument List Management -------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/Option.h"
+
+using namespace clang::driver;
+
+ArgList::ArgList(arglist_type &_Args) : Args(_Args) {
+}
+
+ArgList::~ArgList() {
+}
+
+void ArgList::append(Arg *A) {
+ Args.push_back(A);
+}
+
+Arg *ArgList::getLastArg(options::ID Id, bool Claim) const {
+ // FIXME: Make search efficient?
+ for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id)) {
+ if (Claim) (*it)->claim();
+ return *it;
+ }
+ }
+
+ return 0;
+}
+
+Arg *ArgList::getLastArg(options::ID Id0, options::ID Id1, bool Claim) const {
+ Arg *Res, *A0 = getLastArg(Id0, false), *A1 = getLastArg(Id1, false);
+
+ if (A0 && A1)
+ Res = A0->getIndex() > A1->getIndex() ? A0 : A1;
+ else
+ Res = A0 ? A0 : A1;
+
+ if (Claim && Res)
+ Res->claim();
+
+ return Res;
+}
+
+bool ArgList::hasFlag(options::ID Pos, options::ID Neg, bool Default) const {
+ if (Arg *A = getLastArg(Pos, Neg))
+ return A->getOption().matches(Pos);
+ return Default;
+}
+
+void ArgList::AddLastArg(ArgStringList &Output, options::ID Id) const {
+ if (Arg *A = getLastArg(Id)) {
+ A->claim();
+ A->render(*this, Output);
+ }
+}
+
+void ArgList::AddAllArgs(ArgStringList &Output, options::ID Id0) const {
+ // FIXME: Make fast.
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(Id0)) {
+ A->claim();
+ A->render(*this, Output);
+ }
+ }
+}
+
+void ArgList::AddAllArgs(ArgStringList &Output, options::ID Id0,
+ options::ID Id1) const {
+ // FIXME: Make fast.
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(Id0) || A->getOption().matches(Id1)) {
+ A->claim();
+ A->render(*this, Output);
+ }
+ }
+}
+
+void ArgList::AddAllArgs(ArgStringList &Output, options::ID Id0,
+ options::ID Id1, options::ID Id2) const {
+ // FIXME: Make fast.
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(Id0) || A->getOption().matches(Id1) ||
+ A->getOption().matches(Id2)) {
+ A->claim();
+ A->render(*this, Output);
+ }
+ }
+}
+
+void ArgList::AddAllArgValues(ArgStringList &Output, options::ID Id0) const {
+ // FIXME: Make fast.
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(Id0)) {
+ A->claim();
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
+ Output.push_back(A->getValue(*this, i));
+ }
+ }
+}
+
+void ArgList::AddAllArgValues(ArgStringList &Output, options::ID Id0,
+ options::ID Id1) const {
+ // FIXME: Make fast.
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(Id0) || A->getOption().matches(Id1)) {
+ A->claim();
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
+ Output.push_back(A->getValue(*this, i));
+ }
+ }
+}
+
+void ArgList::AddAllArgsTranslated(ArgStringList &Output, options::ID Id0,
+ const char *Translation,
+ bool Joined) const {
+ // FIXME: Make fast.
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(Id0)) {
+ A->claim();
+
+ if (Joined) {
+ std::string Value = Translation;
+ Value += A->getValue(*this, 0);
+ Output.push_back(MakeArgString(Value.c_str()));
+ } else {
+ Output.push_back(Translation);
+ Output.push_back(A->getValue(*this, 0));
+ }
+ }
+ }
+}
+
+void ArgList::ClaimAllArgs(options::ID Id0) const {
+ // FIXME: Make fast.
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(Id0))
+ A->claim();
+ }
+}
+
+//
+
+InputArgList::InputArgList(const char **ArgBegin, const char **ArgEnd)
+ : ArgList(ActualArgs), NumInputArgStrings(ArgEnd - ArgBegin)
+{
+ ArgStrings.append(ArgBegin, ArgEnd);
+}
+
+InputArgList::~InputArgList() {
+ // An InputArgList always owns its arguments.
+ for (iterator it = begin(), ie = end(); it != ie; ++it)
+ delete *it;
+}
+
+unsigned InputArgList::MakeIndex(const char *String0) const {
+ unsigned Index = ArgStrings.size();
+
+ // Tuck away so we have a reliable const char *.
+ SynthesizedStrings.push_back(String0);
+ ArgStrings.push_back(SynthesizedStrings.back().c_str());
+
+ return Index;
+}
+
+unsigned InputArgList::MakeIndex(const char *String0,
+ const char *String1) const {
+ unsigned Index0 = MakeIndex(String0);
+ unsigned Index1 = MakeIndex(String1);
+ assert(Index0 + 1 == Index1 && "Unexpected non-consecutive indices!");
+ (void) Index1;
+ return Index0;
+}
+
+const char *InputArgList::MakeArgString(const char *Str) const {
+ return getArgString(MakeIndex(Str));
+}
+
+//
+
+DerivedArgList::DerivedArgList(InputArgList &_BaseArgs, bool _OnlyProxy)
+ : ArgList(_OnlyProxy ? _BaseArgs.getArgs() : ActualArgs),
+ BaseArgs(_BaseArgs), OnlyProxy(_OnlyProxy)
+{
+}
+
+DerivedArgList::~DerivedArgList() {
+ // We only own the arguments we explicitly synthesized.
+ for (iterator it = SynthesizedArgs.begin(), ie = SynthesizedArgs.end();
+ it != ie; ++it)
+ delete *it;
+}
+
+const char *DerivedArgList::MakeArgString(const char *Str) const {
+ return BaseArgs.MakeArgString(Str);
+}
+
+Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option *Opt) const {
+ return new FlagArg(Opt, BaseArgs.MakeIndex(Opt->getName()), BaseArg);
+}
+
+Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option *Opt,
+ const char *Value) const {
+ return new PositionalArg(Opt, BaseArgs.MakeIndex(Value), BaseArg);
+}
+
+Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option *Opt,
+ const char *Value) const {
+ return new SeparateArg(Opt, BaseArgs.MakeIndex(Opt->getName(), Value), 1,
+ BaseArg);
+}
+
+Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option *Opt,
+ const char *Value) const {
+ std::string Joined(Opt->getName());
+ Joined += Value;
+ return new JoinedArg(Opt, BaseArgs.MakeIndex(Joined.c_str()), BaseArg);
+}
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
new file mode 100644
index 0000000..7147d8f
--- /dev/null
+++ b/lib/Driver/CMakeLists.txt
@@ -0,0 +1,19 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangDriver
+ Action.cpp
+ Arg.cpp
+ ArgList.cpp
+ Compilation.cpp
+ Driver.cpp
+ HostInfo.cpp
+ Job.cpp
+ OptTable.cpp
+ Option.cpp
+ Phases.cpp
+ Tool.cpp
+ ToolChain.cpp
+ ToolChains.cpp
+ Tools.cpp
+ Types.cpp
+ )
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
new file mode 100644
index 0000000..1e044c6b
--- /dev/null
+++ b/lib/Driver/Compilation.cpp
@@ -0,0 +1,174 @@
+//===--- Compilation.cpp - Compilation Task Implementation --------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Compilation.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/ToolChain.h"
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Program.h"
+#include <sys/stat.h>
+#include <errno.h>
+using namespace clang::driver;
+
+Compilation::Compilation(Driver &D,
+ ToolChain &_DefaultToolChain,
+ InputArgList *_Args)
+ : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args) {
+}
+
+Compilation::~Compilation() {
+ delete Args;
+
+ // Free any derived arg lists.
+ for (llvm::DenseMap<const ToolChain*, DerivedArgList*>::iterator
+ it = TCArgs.begin(), ie = TCArgs.end(); it != ie; ++it)
+ delete it->second;
+
+ // Free the actions, if built.
+ for (ActionList::iterator it = Actions.begin(), ie = Actions.end();
+ it != ie; ++it)
+ delete *it;
+}
+
+const DerivedArgList &Compilation::getArgsForToolChain(const ToolChain *TC) {
+ if (!TC)
+ TC = &DefaultToolChain;
+
+ DerivedArgList *&Entry = TCArgs[TC];
+ if (!Entry)
+ Entry = TC->TranslateArgs(*Args);
+
+ return *Entry;
+}
+
+void Compilation::PrintJob(llvm::raw_ostream &OS, const Job &J,
+ const char *Terminator, bool Quote) const {
+ if (const Command *C = dyn_cast<Command>(&J)) {
+ OS << " \"" << C->getExecutable() << '"';
+ for (ArgStringList::const_iterator it = C->getArguments().begin(),
+ ie = C->getArguments().end(); it != ie; ++it) {
+ if (Quote)
+ OS << " \"" << *it << '"';
+ else
+ OS << ' ' << *it;
+ }
+ OS << Terminator;
+ } else if (const PipedJob *PJ = dyn_cast<PipedJob>(&J)) {
+ for (PipedJob::const_iterator
+ it = PJ->begin(), ie = PJ->end(); it != ie; ++it)
+ PrintJob(OS, **it, (it + 1 != PJ->end()) ? " |\n" : "\n", Quote);
+ } else {
+ const JobList *Jobs = cast<JobList>(&J);
+ for (JobList::const_iterator
+ it = Jobs->begin(), ie = Jobs->end(); it != ie; ++it)
+ PrintJob(OS, **it, Terminator, Quote);
+ }
+}
+
+bool Compilation::CleanupFileList(const ArgStringList &Files,
+ bool IssueErrors) const {
+ bool Success = true;
+
+ for (ArgStringList::const_iterator
+ it = Files.begin(), ie = Files.end(); it != ie; ++it) {
+ llvm::sys::Path P(*it);
+ std::string Error;
+
+ if (P.eraseFromDisk(false, &Error)) {
+ // Failure is only failure if the file doesn't exist. There is a
+ // race condition here due to the limited interface of
+ // llvm::sys::Path, we want to know if the removal gave E_NOENT.
+
+ // FIXME: Grumble, P.exists() is broken. PR3837.
+ struct stat buf;
+ if (::stat(P.c_str(), &buf) == 0
+ || errno != ENOENT) {
+ if (IssueErrors)
+ getDriver().Diag(clang::diag::err_drv_unable_to_remove_file)
+ << Error;
+ Success = false;
+ }
+ }
+ }
+
+ return Success;
+}
+
+int Compilation::ExecuteCommand(const Command &C) const {
+ llvm::sys::Path Prog(C.getExecutable());
+ const char **Argv = new const char*[C.getArguments().size() + 2];
+ Argv[0] = C.getExecutable();
+ std::copy(C.getArguments().begin(), C.getArguments().end(), Argv+1);
+ Argv[C.getArguments().size() + 1] = 0;
+
+ if (getDriver().CCCEcho || getArgs().hasArg(options::OPT_v))
+ PrintJob(llvm::errs(), C, "\n", false);
+
+ std::string Error;
+ int Res =
+ llvm::sys::Program::ExecuteAndWait(Prog, Argv,
+ /*env*/0, /*redirects*/0,
+ /*secondsToWait*/0, /*memoryLimit*/0,
+ &Error);
+ if (!Error.empty()) {
+ assert(Res && "Error string set with 0 result code!");
+ getDriver().Diag(clang::diag::err_drv_command_failure) << Error;
+ }
+
+ delete[] Argv;
+ return Res;
+}
+
+int Compilation::ExecuteJob(const Job &J) const {
+ if (const Command *C = dyn_cast<Command>(&J)) {
+ return ExecuteCommand(*C);
+ } else if (const PipedJob *PJ = dyn_cast<PipedJob>(&J)) {
+ // Piped commands with a single job are easy.
+ if (PJ->size() == 1)
+ return ExecuteCommand(**PJ->begin());
+
+ getDriver().Diag(clang::diag::err_drv_unsupported_opt) << "-pipe";
+ return 1;
+ } else {
+ const JobList *Jobs = cast<JobList>(&J);
+ for (JobList::const_iterator
+ it = Jobs->begin(), ie = Jobs->end(); it != ie; ++it)
+ if (int Res = ExecuteJob(**it))
+ return Res;
+ return 0;
+ }
+}
+
+int Compilation::Execute() const {
+ // Just print if -### was present.
+ if (getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
+ PrintJob(llvm::errs(), Jobs, "\n", true);
+ return 0;
+ }
+
+ // If there were errors building the compilation, quit now.
+ if (getDriver().getDiags().getNumErrors())
+ return 1;
+
+ int Res = ExecuteJob(Jobs);
+
+ // Remove temp files.
+ CleanupFileList(TempFiles);
+
+ // If the compilation failed, remove result files as well.
+ if (Res != 0 && !getArgs().hasArg(options::OPT_save_temps))
+ CleanupFileList(ResultFiles, true);
+
+ return Res;
+}
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
new file mode 100644
index 0000000..d8c6a0a
--- /dev/null
+++ b/lib/Driver/Driver.cpp
@@ -0,0 +1,1254 @@
+//===--- Driver.cpp - Clang GCC Compatible Driver -----------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Driver.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/HostInfo.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Types.h"
+
+#include "clang/Basic/Version.h"
+
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+#include "llvm/System/Program.h"
+
+#include "InputInfo.h"
+
+#include <map>
+
+using namespace clang::driver;
+using namespace clang;
+
+Driver::Driver(const char *_Name, const char *_Dir,
+ const char *_DefaultHostTriple,
+ const char *_DefaultImageName,
+ Diagnostic &_Diags)
+ : Opts(new OptTable()), Diags(_Diags),
+ Name(_Name), Dir(_Dir), DefaultHostTriple(_DefaultHostTriple),
+ DefaultImageName(_DefaultImageName),
+ Host(0),
+ CCCIsCXX(false), CCCEcho(false), CCCPrintBindings(false),
+ CCCGenericGCCName("gcc"), CCCUseClang(true), CCCUseClangCXX(false),
+ CCCUseClangCPP(true), CCCUsePCH(true),
+ SuppressMissingInputWarning(false)
+{
+ // Only use clang on i386 and x86_64 by default.
+ CCCClangArchs.insert("i386");
+ CCCClangArchs.insert("x86_64");
+}
+
+Driver::~Driver() {
+ delete Opts;
+ delete Host;
+}
+
+InputArgList *Driver::ParseArgStrings(const char **ArgBegin,
+ const char **ArgEnd) {
+ llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
+ InputArgList *Args = new InputArgList(ArgBegin, ArgEnd);
+
+ // FIXME: Handle '@' args (or at least error on them).
+
+ unsigned Index = 0, End = ArgEnd - ArgBegin;
+ while (Index < End) {
+ // gcc's handling of empty arguments doesn't make
+ // sense, but this is not a common use case. :)
+ //
+ // We just ignore them here (note that other things may
+ // still take them as arguments).
+ if (Args->getArgString(Index)[0] == '\0') {
+ ++Index;
+ continue;
+ }
+
+ unsigned Prev = Index;
+ Arg *A = getOpts().ParseOneArg(*Args, Index);
+ assert(Index > Prev && "Parser failed to consume argument.");
+
+ // Check for missing argument error.
+ if (!A) {
+ assert(Index >= End && "Unexpected parser error.");
+ Diag(clang::diag::err_drv_missing_argument)
+ << Args->getArgString(Prev)
+ << (Index - Prev - 1);
+ break;
+ }
+
+ if (A->getOption().isUnsupported()) {
+ Diag(clang::diag::err_drv_unsupported_opt) << A->getAsString(*Args);
+ continue;
+ }
+ Args->append(A);
+ }
+
+ return Args;
+}
+
+Compilation *Driver::BuildCompilation(int argc, const char **argv) {
+ llvm::PrettyStackTraceString CrashInfo("Compilation construction");
+
+ // FIXME: Handle environment options which effect driver behavior,
+ // somewhere (client?). GCC_EXEC_PREFIX, COMPILER_PATH,
+ // LIBRARY_PATH, LPATH, CC_PRINT_OPTIONS, QA_OVERRIDE_GCC3_OPTIONS.
+
+ // FIXME: What are we going to do with -V and -b?
+
+ // FIXME: This stuff needs to go into the Compilation, not the
+ // driver.
+ bool CCCPrintOptions = false, CCCPrintActions = false;
+
+ const char **Start = argv + 1, **End = argv + argc;
+ const char *HostTriple = DefaultHostTriple.c_str();
+
+ // Read -ccc args.
+ //
+ // FIXME: We need to figure out where this behavior should
+ // live. Most of it should be outside in the client; the parts that
+ // aren't should have proper options, either by introducing new ones
+ // or by overloading gcc ones like -V or -b.
+ for (; Start != End && memcmp(*Start, "-ccc-", 5) == 0; ++Start) {
+ const char *Opt = *Start + 5;
+
+ if (!strcmp(Opt, "print-options")) {
+ CCCPrintOptions = true;
+ } else if (!strcmp(Opt, "print-phases")) {
+ CCCPrintActions = true;
+ } else if (!strcmp(Opt, "print-bindings")) {
+ CCCPrintBindings = true;
+ } else if (!strcmp(Opt, "cxx")) {
+ CCCIsCXX = true;
+ } else if (!strcmp(Opt, "echo")) {
+ CCCEcho = true;
+
+ } else if (!strcmp(Opt, "gcc-name")) {
+ assert(Start+1 < End && "FIXME: -ccc- argument handling.");
+ CCCGenericGCCName = *++Start;
+
+ } else if (!strcmp(Opt, "clang-cxx")) {
+ CCCUseClangCXX = true;
+ } else if (!strcmp(Opt, "pch-is-pch")) {
+ CCCUsePCH = true;
+ } else if (!strcmp(Opt, "pch-is-pth")) {
+ CCCUsePCH = false;
+ } else if (!strcmp(Opt, "no-clang")) {
+ CCCUseClang = false;
+ } else if (!strcmp(Opt, "no-clang-cpp")) {
+ CCCUseClangCPP = false;
+ } else if (!strcmp(Opt, "clang-archs")) {
+ assert(Start+1 < End && "FIXME: -ccc- argument handling.");
+ const char *Cur = *++Start;
+
+ CCCClangArchs.clear();
+ for (;;) {
+ const char *Next = strchr(Cur, ',');
+
+ if (Next) {
+ if (Cur != Next)
+ CCCClangArchs.insert(std::string(Cur, Next));
+ Cur = Next + 1;
+ } else {
+ if (*Cur != '\0')
+ CCCClangArchs.insert(std::string(Cur));
+ break;
+ }
+ }
+
+ } else if (!strcmp(Opt, "host-triple")) {
+ assert(Start+1 < End && "FIXME: -ccc- argument handling.");
+ HostTriple = *++Start;
+
+ } else {
+ // FIXME: Error handling.
+ llvm::errs() << "invalid option: " << *Start << "\n";
+ exit(1);
+ }
+ }
+
+ InputArgList *Args = ParseArgStrings(Start, End);
+
+ Host = GetHostInfo(HostTriple);
+
+ // The compilation takes ownership of Args.
+ Compilation *C = new Compilation(*this, *Host->getToolChain(*Args), Args);
+
+ // FIXME: This behavior shouldn't be here.
+ if (CCCPrintOptions) {
+ PrintOptions(C->getArgs());
+ return C;
+ }
+
+ if (!HandleImmediateArgs(*C))
+ return C;
+
+ // Construct the list of abstract actions to perform for this
+ // compilation. We avoid passing a Compilation here simply to
+ // enforce the abstraction that pipelining is not host or toolchain
+ // dependent (other than the driver driver test).
+ if (Host->useDriverDriver())
+ BuildUniversalActions(C->getArgs(), C->getActions());
+ else
+ BuildActions(C->getArgs(), C->getActions());
+
+ if (CCCPrintActions) {
+ PrintActions(*C);
+ return C;
+ }
+
+ BuildJobs(*C);
+
+ return C;
+}
+
+void Driver::PrintOptions(const ArgList &Args) const {
+ unsigned i = 0;
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it, ++i) {
+ Arg *A = *it;
+ llvm::errs() << "Option " << i << " - "
+ << "Name: \"" << A->getOption().getName() << "\", "
+ << "Values: {";
+ for (unsigned j = 0; j < A->getNumValues(); ++j) {
+ if (j)
+ llvm::errs() << ", ";
+ llvm::errs() << '"' << A->getValue(Args, j) << '"';
+ }
+ llvm::errs() << "}\n";
+ }
+}
+
+static std::string getOptionHelpName(const OptTable &Opts, options::ID Id) {
+ std::string Name = Opts.getOptionName(Id);
+
+ // Add metavar, if used.
+ switch (Opts.getOptionKind(Id)) {
+ case Option::GroupClass: case Option::InputClass: case Option::UnknownClass:
+ assert(0 && "Invalid option with help text.");
+
+ case Option::MultiArgClass: case Option::JoinedAndSeparateClass:
+ assert(0 && "Cannot print metavar for this kind of option.");
+
+ case Option::FlagClass:
+ break;
+
+ case Option::SeparateClass: case Option::JoinedOrSeparateClass:
+ Name += ' ';
+ // FALLTHROUGH
+ case Option::JoinedClass: case Option::CommaJoinedClass:
+ Name += Opts.getOptionMetaVar(Id);
+ break;
+ }
+
+ return Name;
+}
+
+void Driver::PrintHelp(bool ShowHidden) const {
+ llvm::raw_ostream &OS = llvm::outs();
+
+ OS << "OVERVIEW: clang \"gcc-compatible\" driver\n";
+ OS << '\n';
+ OS << "USAGE: " << Name << " [options] <input files>\n";
+ OS << '\n';
+ OS << "OPTIONS:\n";
+
+ // Render help text into (option, help) pairs.
+ std::vector< std::pair<std::string, const char*> > OptionHelp;
+
+ for (unsigned i = options::OPT_INPUT, e = options::LastOption; i != e; ++i) {
+ options::ID Id = (options::ID) i;
+ if (const char *Text = getOpts().getOptionHelpText(Id))
+ OptionHelp.push_back(std::make_pair(getOptionHelpName(getOpts(), Id),
+ Text));
+ }
+
+ if (ShowHidden) {
+ OptionHelp.push_back(std::make_pair("\nDRIVER OPTIONS:",""));
+ OptionHelp.push_back(std::make_pair("-ccc-cxx",
+ "Act as a C++ driver"));
+ OptionHelp.push_back(std::make_pair("-ccc-gcc-name",
+ "Name for native GCC compiler"));
+ OptionHelp.push_back(std::make_pair("-ccc-clang-cxx",
+ "Use the clang compiler for C++"));
+ OptionHelp.push_back(std::make_pair("-ccc-no-clang",
+ "Never use the clang compiler"));
+ OptionHelp.push_back(std::make_pair("-ccc-no-clang-cpp",
+ "Never use the clang preprocessor"));
+ OptionHelp.push_back(std::make_pair("-ccc-clang-archs",
+ "Comma separate list of architectures "
+ "to use the clang compiler for"));
+ OptionHelp.push_back(std::make_pair("-ccc-pch-is-pch",
+ "Use lazy PCH for precompiled headers"));
+ OptionHelp.push_back(std::make_pair("-ccc-pch-is-pth",
+ "Use pretokenized headers for precompiled headers"));
+
+ OptionHelp.push_back(std::make_pair("\nDEBUG/DEVELOPMENT OPTIONS:",""));
+ OptionHelp.push_back(std::make_pair("-ccc-host-triple",
+ "Simulate running on the given target"));
+ OptionHelp.push_back(std::make_pair("-ccc-print-options",
+ "Dump parsed command line arguments"));
+ OptionHelp.push_back(std::make_pair("-ccc-print-phases",
+ "Dump list of actions to perform"));
+ OptionHelp.push_back(std::make_pair("-ccc-print-bindings",
+ "Show bindings of tools to actions"));
+ OptionHelp.push_back(std::make_pair("CCC_ADD_ARGS",
+ "(ENVIRONMENT VARIABLE) Comma separated list of "
+ "arguments to prepend to the command line"));
+ }
+
+ // Find the maximum option length.
+ unsigned OptionFieldWidth = 0;
+ for (unsigned i = 0, e = OptionHelp.size(); i != e; ++i) {
+ // Skip titles.
+ if (!OptionHelp[i].second)
+ continue;
+
+ // Limit the amount of padding we are willing to give up for
+ // alignment.
+ unsigned Length = OptionHelp[i].first.size();
+ if (Length <= 23)
+ OptionFieldWidth = std::max(OptionFieldWidth, Length);
+ }
+
+ for (unsigned i = 0, e = OptionHelp.size(); i != e; ++i) {
+ const std::string &Option = OptionHelp[i].first;
+ OS << " " << Option;
+ for (int j = Option.length(), e = OptionFieldWidth; j < e; ++j)
+ OS << ' ';
+ OS << ' ' << OptionHelp[i].second << '\n';
+ }
+
+ OS.flush();
+}
+
+void Driver::PrintVersion(const Compilation &C) const {
+ static char buf[] = "$URL: https://ed@llvm.org/svn/llvm-project/cfe/trunk/lib/Driver/Driver.cpp $";
+ char *zap = strstr(buf, "/lib/Driver");
+ if (zap)
+ *zap = 0;
+ zap = strstr(buf, "/clang/tools/clang");
+ if (zap)
+ *zap = 0;
+ const char *vers = buf+6;
+ // FIXME: Add cmake support and remove #ifdef
+#ifdef SVN_REVISION
+ const char *revision = SVN_REVISION;
+#else
+ const char *revision = "";
+#endif
+ // FIXME: The following handlers should use a callback mechanism, we
+ // don't know what the client would like to do.
+
+ llvm::errs() << "clang version " CLANG_VERSION_STRING " ("
+ << vers << " " << revision << ")" << "\n";
+
+ const ToolChain &TC = C.getDefaultToolChain();
+ llvm::errs() << "Target: " << TC.getTripleString() << '\n';
+}
+
+bool Driver::HandleImmediateArgs(const Compilation &C) {
+ // The order these options are handled in in gcc is all over the
+ // place, but we don't expect inconsistencies w.r.t. that to matter
+ // in practice.
+
+ if (C.getArgs().hasArg(options::OPT_dumpversion)) {
+ llvm::outs() << CLANG_VERSION_STRING "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT__help) ||
+ C.getArgs().hasArg(options::OPT__help_hidden)) {
+ PrintHelp(C.getArgs().hasArg(options::OPT__help_hidden));
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT__version)) {
+ PrintVersion(C);
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_v) ||
+ C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
+ PrintVersion(C);
+ SuppressMissingInputWarning = true;
+ }
+
+ const ToolChain &TC = C.getDefaultToolChain();
+ if (C.getArgs().hasArg(options::OPT_print_search_dirs)) {
+ llvm::outs() << "programs: =";
+ for (ToolChain::path_list::const_iterator it = TC.getProgramPaths().begin(),
+ ie = TC.getProgramPaths().end(); it != ie; ++it) {
+ if (it != TC.getProgramPaths().begin())
+ llvm::outs() << ':';
+ llvm::outs() << *it;
+ }
+ llvm::outs() << "\n";
+ llvm::outs() << "libraries: =";
+ for (ToolChain::path_list::const_iterator it = TC.getFilePaths().begin(),
+ ie = TC.getFilePaths().end(); it != ie; ++it) {
+ if (it != TC.getFilePaths().begin())
+ llvm::outs() << ':';
+ llvm::outs() << *it;
+ }
+ llvm::outs() << "\n";
+ return false;
+ }
+
+ // FIXME: The following handlers should use a callback mechanism, we
+ // don't know what the client would like to do.
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
+ llvm::outs() << GetFilePath(A->getValue(C.getArgs()), TC).toString()
+ << "\n";
+ return false;
+ }
+
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_print_prog_name_EQ)) {
+ llvm::outs() << GetProgramPath(A->getValue(C.getArgs()), TC).toString()
+ << "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_print_libgcc_file_name)) {
+ llvm::outs() << GetFilePath("libgcc.a", TC).toString() << "\n";
+ return false;
+ }
+
+ return true;
+}
+
+static unsigned PrintActions1(const Compilation &C,
+ Action *A,
+ std::map<Action*, unsigned> &Ids) {
+ if (Ids.count(A))
+ return Ids[A];
+
+ std::string str;
+ llvm::raw_string_ostream os(str);
+
+ os << Action::getClassName(A->getKind()) << ", ";
+ if (InputAction *IA = dyn_cast<InputAction>(A)) {
+ os << "\"" << IA->getInputArg().getValue(C.getArgs()) << "\"";
+ } else if (BindArchAction *BIA = dyn_cast<BindArchAction>(A)) {
+ os << '"' << (BIA->getArchName() ? BIA->getArchName() :
+ C.getDefaultToolChain().getArchName()) << '"'
+ << ", {" << PrintActions1(C, *BIA->begin(), Ids) << "}";
+ } else {
+ os << "{";
+ for (Action::iterator it = A->begin(), ie = A->end(); it != ie;) {
+ os << PrintActions1(C, *it, Ids);
+ ++it;
+ if (it != ie)
+ os << ", ";
+ }
+ os << "}";
+ }
+
+ unsigned Id = Ids.size();
+ Ids[A] = Id;
+ llvm::errs() << Id << ": " << os.str() << ", "
+ << types::getTypeName(A->getType()) << "\n";
+
+ return Id;
+}
+
+void Driver::PrintActions(const Compilation &C) const {
+ std::map<Action*, unsigned> Ids;
+ for (ActionList::const_iterator it = C.getActions().begin(),
+ ie = C.getActions().end(); it != ie; ++it)
+ PrintActions1(C, *it, Ids);
+}
+
+void Driver::BuildUniversalActions(const ArgList &Args,
+ ActionList &Actions) const {
+ llvm::PrettyStackTraceString CrashInfo("Building actions for universal build");
+ // Collect the list of architectures. Duplicates are allowed, but
+ // should only be handled once (in the order seen).
+ llvm::StringSet<> ArchNames;
+ llvm::SmallVector<const char *, 4> Archs;
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it) {
+ Arg *A = *it;
+
+ if (A->getOption().getId() == options::OPT_arch) {
+ const char *Name = A->getValue(Args);
+
+ // FIXME: We need to handle canonicalization of the specified
+ // arch?
+
+ A->claim();
+ if (ArchNames.insert(Name))
+ Archs.push_back(Name);
+ }
+ }
+
+ // When there is no explicit arch for this platform, make sure we
+ // still bind the architecture (to the default) so that -Xarch_ is
+ // handled correctly.
+ if (!Archs.size())
+ Archs.push_back(0);
+
+ // FIXME: We killed off some others but these aren't yet detected in
+ // a functional manner. If we added information to jobs about which
+ // "auxiliary" files they wrote then we could detect the conflict
+ // these cause downstream.
+ if (Archs.size() > 1) {
+ // No recovery needed, the point of this is just to prevent
+ // overwriting the same files.
+ if (const Arg *A = Args.getLastArg(options::OPT_save_temps))
+ Diag(clang::diag::err_drv_invalid_opt_with_multiple_archs)
+ << A->getAsString(Args);
+ }
+
+ ActionList SingleActions;
+ BuildActions(Args, SingleActions);
+
+ // Add in arch binding and lipo (if necessary) for every top level
+ // action.
+ for (unsigned i = 0, e = SingleActions.size(); i != e; ++i) {
+ Action *Act = SingleActions[i];
+
+ // Make sure we can lipo this kind of output. If not (and it is an
+ // actual output) then we disallow, since we can't create an
+ // output file with the right name without overwriting it. We
+ // could remove this oddity by just changing the output names to
+ // include the arch, which would also fix
+ // -save-temps. Compatibility wins for now.
+
+ if (Archs.size() > 1 && !types::canLipoType(Act->getType()))
+ Diag(clang::diag::err_drv_invalid_output_with_multiple_archs)
+ << types::getTypeName(Act->getType());
+
+ ActionList Inputs;
+ for (unsigned i = 0, e = Archs.size(); i != e; ++i)
+ Inputs.push_back(new BindArchAction(Act, Archs[i]));
+
+ // Lipo if necessary, We do it this way because we need to set the
+ // arch flag so that -Xarch_ gets overwritten.
+ if (Inputs.size() == 1 || Act->getType() == types::TY_Nothing)
+ Actions.append(Inputs.begin(), Inputs.end());
+ else
+ Actions.push_back(new LipoJobAction(Inputs, Act->getType()));
+ }
+}
+
+void Driver::BuildActions(const ArgList &Args, ActionList &Actions) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation actions");
+ // Start by constructing the list of inputs and their types.
+
+ // Track the current user specified (-x) input. We also explicitly
+ // track the argument used to set the type; we only want to claim
+ // the type when we actually use it, so we warn about unused -x
+ // arguments.
+ types::ID InputType = types::TY_Nothing;
+ Arg *InputTypeArg = 0;
+
+ llvm::SmallVector<std::pair<types::ID, const Arg*>, 16> Inputs;
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it) {
+ Arg *A = *it;
+
+ if (isa<InputOption>(A->getOption())) {
+ const char *Value = A->getValue(Args);
+ types::ID Ty = types::TY_INVALID;
+
+ // Infer the input type if necessary.
+ if (InputType == types::TY_Nothing) {
+ // If there was an explicit arg for this, claim it.
+ if (InputTypeArg)
+ InputTypeArg->claim();
+
+ // stdin must be handled specially.
+ if (memcmp(Value, "-", 2) == 0) {
+ // If running with -E, treat as a C input (this changes the
+ // builtin macros, for example). This may be overridden by
+ // -ObjC below.
+ //
+ // Otherwise emit an error but still use a valid type to
+ // avoid spurious errors (e.g., no inputs).
+ if (!Args.hasArg(options::OPT_E, false))
+ Diag(clang::diag::err_drv_unknown_stdin_type);
+ Ty = types::TY_C;
+ } else {
+ // Otherwise lookup by extension, and fallback to ObjectType
+ // if not found. We use a host hook here because Darwin at
+ // least has its own idea of what .s is.
+ if (const char *Ext = strrchr(Value, '.'))
+ Ty = Host->lookupTypeForExtension(Ext + 1);
+
+ if (Ty == types::TY_INVALID)
+ Ty = types::TY_Object;
+ }
+
+ // -ObjC and -ObjC++ override the default language, but only for "source
+ // files". We just treat everything that isn't a linker input as a
+ // source file.
+ //
+ // FIXME: Clean this up if we move the phase sequence into the type.
+ if (Ty != types::TY_Object) {
+ if (Args.hasArg(options::OPT_ObjC))
+ Ty = types::TY_ObjC;
+ else if (Args.hasArg(options::OPT_ObjCXX))
+ Ty = types::TY_ObjCXX;
+ }
+ } else {
+ assert(InputTypeArg && "InputType set w/o InputTypeArg");
+ InputTypeArg->claim();
+ Ty = InputType;
+ }
+
+ // Check that the file exists. It isn't clear this is worth
+ // doing, since the tool presumably does this anyway, and this
+ // just adds an extra stat to the equation, but this is gcc
+ // compatible.
+ if (memcmp(Value, "-", 2) != 0 && !llvm::sys::Path(Value).exists())
+ Diag(clang::diag::err_drv_no_such_file) << A->getValue(Args);
+ else
+ Inputs.push_back(std::make_pair(Ty, A));
+
+ } else if (A->getOption().isLinkerInput()) {
+ // Just treat as object type, we could make a special type for
+ // this if necessary.
+ Inputs.push_back(std::make_pair(types::TY_Object, A));
+
+ } else if (A->getOption().getId() == options::OPT_x) {
+ InputTypeArg = A;
+ InputType = types::lookupTypeForTypeSpecifier(A->getValue(Args));
+
+ // Follow gcc behavior and treat as linker input for invalid -x
+ // options. Its not clear why we shouldn't just revert to
+ // unknown; but this isn't very important, we might as well be
+ // bug comatible.
+ if (!InputType) {
+ Diag(clang::diag::err_drv_unknown_language) << A->getValue(Args);
+ InputType = types::TY_Object;
+ }
+ }
+ }
+
+ if (!SuppressMissingInputWarning && Inputs.empty()) {
+ Diag(clang::diag::err_drv_no_input_files);
+ return;
+ }
+
+ // Determine which compilation mode we are in. We look for options
+ // which affect the phase, starting with the earliest phases, and
+ // record which option we used to determine the final phase.
+ Arg *FinalPhaseArg = 0;
+ phases::ID FinalPhase;
+
+ // -{E,M,MM} only run the preprocessor.
+ if ((FinalPhaseArg = Args.getLastArg(options::OPT_E)) ||
+ (FinalPhaseArg = Args.getLastArg(options::OPT_M)) ||
+ (FinalPhaseArg = Args.getLastArg(options::OPT_MM))) {
+ FinalPhase = phases::Preprocess;
+
+ // -{fsyntax-only,-analyze,emit-llvm,S} only run up to the compiler.
+ } else if ((FinalPhaseArg = Args.getLastArg(options::OPT_fsyntax_only)) ||
+ (FinalPhaseArg = Args.getLastArg(options::OPT__analyze,
+ options::OPT__analyze_auto)) ||
+ (FinalPhaseArg = Args.getLastArg(options::OPT_S))) {
+ FinalPhase = phases::Compile;
+
+ // -c only runs up to the assembler.
+ } else if ((FinalPhaseArg = Args.getLastArg(options::OPT_c))) {
+ FinalPhase = phases::Assemble;
+
+ // Otherwise do everything.
+ } else
+ FinalPhase = phases::Link;
+
+ // Reject -Z* at the top level, these options should never have been
+ // exposed by gcc.
+ if (Arg *A = Args.getLastArg(options::OPT_Z_Joined))
+ Diag(clang::diag::err_drv_use_of_Z_option) << A->getAsString(Args);
+
+ // Construct the actions to perform.
+ ActionList LinkerInputs;
+ for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
+ types::ID InputType = Inputs[i].first;
+ const Arg *InputArg = Inputs[i].second;
+
+ unsigned NumSteps = types::getNumCompilationPhases(InputType);
+ assert(NumSteps && "Invalid number of steps!");
+
+ // If the first step comes after the final phase we are doing as
+ // part of this compilation, warn the user about it.
+ phases::ID InitialPhase = types::getCompilationPhase(InputType, 0);
+ if (InitialPhase > FinalPhase) {
+ // Claim here to avoid the more general unused warning.
+ InputArg->claim();
+ Diag(clang::diag::warn_drv_input_file_unused)
+ << InputArg->getAsString(Args)
+ << getPhaseName(InitialPhase)
+ << FinalPhaseArg->getOption().getName();
+ continue;
+ }
+
+ // Build the pipeline for this file.
+ Action *Current = new InputAction(*InputArg, InputType);
+ for (unsigned i = 0; i != NumSteps; ++i) {
+ phases::ID Phase = types::getCompilationPhase(InputType, i);
+
+ // We are done if this step is past what the user requested.
+ if (Phase > FinalPhase)
+ break;
+
+ // Queue linker inputs.
+ if (Phase == phases::Link) {
+ assert(i + 1 == NumSteps && "linking must be final compilation step.");
+ LinkerInputs.push_back(Current);
+ Current = 0;
+ break;
+ }
+
+ // Some types skip the assembler phase (e.g., llvm-bc), but we
+ // can't encode this in the steps because the intermediate type
+ // depends on arguments. Just special case here.
+ if (Phase == phases::Assemble && Current->getType() != types::TY_PP_Asm)
+ continue;
+
+ // Otherwise construct the appropriate action.
+ Current = ConstructPhaseAction(Args, Phase, Current);
+ if (Current->getType() == types::TY_Nothing)
+ break;
+ }
+
+ // If we ended with something, add to the output list.
+ if (Current)
+ Actions.push_back(Current);
+ }
+
+ // Add a link action if necessary.
+ if (!LinkerInputs.empty())
+ Actions.push_back(new LinkJobAction(LinkerInputs, types::TY_Image));
+}
+
+Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
+ Action *Input) const {
+ llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
+ // Build the appropriate action.
+ switch (Phase) {
+ case phases::Link: assert(0 && "link action invalid here.");
+ case phases::Preprocess: {
+ types::ID OutputTy;
+ // -{M, MM} alter the output type.
+ if (Args.hasArg(options::OPT_M) || Args.hasArg(options::OPT_MM)) {
+ OutputTy = types::TY_Dependencies;
+ } else {
+ OutputTy = types::getPreprocessedType(Input->getType());
+ assert(OutputTy != types::TY_INVALID &&
+ "Cannot preprocess this input type!");
+ }
+ return new PreprocessJobAction(Input, OutputTy);
+ }
+ case phases::Precompile:
+ return new PrecompileJobAction(Input, types::TY_PCH);
+ case phases::Compile: {
+ if (Args.hasArg(options::OPT_fsyntax_only)) {
+ return new CompileJobAction(Input, types::TY_Nothing);
+ } else if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto)) {
+ return new AnalyzeJobAction(Input, types::TY_Plist);
+ } else if (Args.hasArg(options::OPT_emit_llvm) ||
+ Args.hasArg(options::OPT_flto) ||
+ Args.hasArg(options::OPT_O4)) {
+ types::ID Output =
+ Args.hasArg(options::OPT_S) ? types::TY_LLVMAsm : types::TY_LLVMBC;
+ return new CompileJobAction(Input, Output);
+ } else {
+ return new CompileJobAction(Input, types::TY_PP_Asm);
+ }
+ }
+ case phases::Assemble:
+ return new AssembleJobAction(Input, types::TY_Object);
+ }
+
+ assert(0 && "invalid phase in ConstructPhaseAction");
+ return 0;
+}
+
+void Driver::BuildJobs(Compilation &C) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
+ bool SaveTemps = C.getArgs().hasArg(options::OPT_save_temps);
+ bool UsePipes = C.getArgs().hasArg(options::OPT_pipe);
+
+ // FIXME: Pipes are forcibly disabled until we support executing
+ // them.
+ if (!CCCPrintBindings)
+ UsePipes = false;
+
+ // -save-temps inhibits pipes.
+ if (SaveTemps && UsePipes) {
+ Diag(clang::diag::warn_drv_pipe_ignored_with_save_temps);
+ UsePipes = true;
+ }
+
+ Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
+
+ // It is an error to provide a -o option if we are making multiple
+ // output files.
+ if (FinalOutput) {
+ unsigned NumOutputs = 0;
+ for (ActionList::const_iterator it = C.getActions().begin(),
+ ie = C.getActions().end(); it != ie; ++it)
+ if ((*it)->getType() != types::TY_Nothing)
+ ++NumOutputs;
+
+ if (NumOutputs > 1) {
+ Diag(clang::diag::err_drv_output_argument_with_multiple_files);
+ FinalOutput = 0;
+ }
+ }
+
+ for (ActionList::const_iterator it = C.getActions().begin(),
+ ie = C.getActions().end(); it != ie; ++it) {
+ Action *A = *it;
+
+ // If we are linking an image for multiple archs then the linker
+ // wants -arch_multiple and -final_output <final image
+ // name>. Unfortunately, this doesn't fit in cleanly because we
+ // have to pass this information down.
+ //
+ // FIXME: This is a hack; find a cleaner way to integrate this
+ // into the process.
+ const char *LinkingOutput = 0;
+ if (isa<LipoJobAction>(A)) {
+ if (FinalOutput)
+ LinkingOutput = FinalOutput->getValue(C.getArgs());
+ else
+ LinkingOutput = DefaultImageName.c_str();
+ }
+
+ InputInfo II;
+ BuildJobsForAction(C, A, &C.getDefaultToolChain(),
+ /*CanAcceptPipe*/ true,
+ /*AtTopLevel*/ true,
+ /*LinkingOutput*/ LinkingOutput,
+ II);
+ }
+
+ // If the user passed -Qunused-arguments or there were errors, don't
+ // warn about any unused arguments.
+ if (Diags.getNumErrors() ||
+ C.getArgs().hasArg(options::OPT_Qunused_arguments))
+ return;
+
+ // Claim -### here.
+ (void) C.getArgs().hasArg(options::OPT__HASH_HASH_HASH);
+
+ for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
+ it != ie; ++it) {
+ Arg *A = *it;
+
+ // FIXME: It would be nice to be able to send the argument to the
+ // Diagnostic, so that extra values, position, and so on could be
+ // printed.
+ if (!A->isClaimed()) {
+ if (A->getOption().hasNoArgumentUnused())
+ continue;
+
+ // Suppress the warning automatically if this is just a flag,
+ // and it is an instance of an argument we already claimed.
+ const Option &Opt = A->getOption();
+ if (isa<FlagOption>(Opt)) {
+ bool DuplicateClaimed = false;
+
+ // FIXME: Use iterator.
+ for (ArgList::const_iterator it = C.getArgs().begin(),
+ ie = C.getArgs().end(); it != ie; ++it) {
+ if ((*it)->isClaimed() && (*it)->getOption().matches(Opt.getId())) {
+ DuplicateClaimed = true;
+ break;
+ }
+ }
+
+ if (DuplicateClaimed)
+ continue;
+ }
+
+ Diag(clang::diag::warn_drv_unused_argument)
+ << A->getAsString(C.getArgs());
+ }
+ }
+}
+
+void Driver::BuildJobsForAction(Compilation &C,
+ const Action *A,
+ const ToolChain *TC,
+ bool CanAcceptPipe,
+ bool AtTopLevel,
+ const char *LinkingOutput,
+ InputInfo &Result) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation jobs for action");
+
+ bool UsePipes = C.getArgs().hasArg(options::OPT_pipe);
+ // FIXME: Pipes are forcibly disabled until we support executing
+ // them.
+ if (!CCCPrintBindings)
+ UsePipes = false;
+
+ if (const InputAction *IA = dyn_cast<InputAction>(A)) {
+ // FIXME: It would be nice to not claim this here; maybe the old
+ // scheme of just using Args was better?
+ const Arg &Input = IA->getInputArg();
+ Input.claim();
+ if (isa<PositionalArg>(Input)) {
+ const char *Name = Input.getValue(C.getArgs());
+ Result = InputInfo(Name, A->getType(), Name);
+ } else
+ Result = InputInfo(&Input, A->getType(), "");
+ return;
+ }
+
+ if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
+ const char *ArchName = BAA->getArchName();
+ std::string Arch;
+ if (!ArchName) {
+ Arch = C.getDefaultToolChain().getArchName();
+ ArchName = Arch.c_str();
+ }
+ BuildJobsForAction(C,
+ *BAA->begin(),
+ Host->getToolChain(C.getArgs(), ArchName),
+ CanAcceptPipe,
+ AtTopLevel,
+ LinkingOutput,
+ Result);
+ return;
+ }
+
+ const JobAction *JA = cast<JobAction>(A);
+ const Tool &T = TC->SelectTool(C, *JA);
+
+ // See if we should use an integrated preprocessor. We do so when we
+ // have exactly one input, since this is the only use case we care
+ // about (irrelevant since we don't support combine yet).
+ bool UseIntegratedCPP = false;
+ const ActionList *Inputs = &A->getInputs();
+ if (Inputs->size() == 1 && isa<PreprocessJobAction>(*Inputs->begin())) {
+ if (!C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
+ !C.getArgs().hasArg(options::OPT_traditional_cpp) &&
+ !C.getArgs().hasArg(options::OPT_save_temps) &&
+ T.hasIntegratedCPP()) {
+ UseIntegratedCPP = true;
+ Inputs = &(*Inputs)[0]->getInputs();
+ }
+ }
+
+ // Only use pipes when there is exactly one input.
+ bool TryToUsePipeInput = Inputs->size() == 1 && T.acceptsPipedInput();
+ InputInfoList InputInfos;
+ for (ActionList::const_iterator it = Inputs->begin(), ie = Inputs->end();
+ it != ie; ++it) {
+ InputInfo II;
+ BuildJobsForAction(C, *it, TC, TryToUsePipeInput,
+ /*AtTopLevel*/false,
+ LinkingOutput,
+ II);
+ InputInfos.push_back(II);
+ }
+
+ // Determine if we should output to a pipe.
+ bool OutputToPipe = false;
+ if (CanAcceptPipe && T.canPipeOutput()) {
+ // Some actions default to writing to a pipe if they are the top
+ // level phase and there was no user override.
+ //
+ // FIXME: Is there a better way to handle this?
+ if (AtTopLevel) {
+ if (isa<PreprocessJobAction>(A) && !C.getArgs().hasArg(options::OPT_o))
+ OutputToPipe = true;
+ } else if (UsePipes)
+ OutputToPipe = true;
+ }
+
+ // Figure out where to put the job (pipes).
+ Job *Dest = &C.getJobs();
+ if (InputInfos[0].isPipe()) {
+ assert(TryToUsePipeInput && "Unrequested pipe!");
+ assert(InputInfos.size() == 1 && "Unexpected pipe with multiple inputs.");
+ Dest = &InputInfos[0].getPipe();
+ }
+
+ // Always use the first input as the base input.
+ const char *BaseInput = InputInfos[0].getBaseInput();
+
+ // Determine the place to write output to (nothing, pipe, or
+ // filename) and where to put the new job.
+ if (JA->getType() == types::TY_Nothing) {
+ Result = InputInfo(A->getType(), BaseInput);
+ } else if (OutputToPipe) {
+ // Append to current piped job or create a new one as appropriate.
+ PipedJob *PJ = dyn_cast<PipedJob>(Dest);
+ if (!PJ) {
+ PJ = new PipedJob();
+ // FIXME: Temporary hack so that -ccc-print-bindings work until
+ // we have pipe support. Please remove later.
+ if (!CCCPrintBindings)
+ cast<JobList>(Dest)->addJob(PJ);
+ Dest = PJ;
+ }
+ Result = InputInfo(PJ, A->getType(), BaseInput);
+ } else {
+ Result = InputInfo(GetNamedOutputPath(C, *JA, BaseInput, AtTopLevel),
+ A->getType(), BaseInput);
+ }
+
+ if (CCCPrintBindings) {
+ llvm::errs() << "# \"" << T.getToolChain().getTripleString() << '"'
+ << " - \"" << T.getName() << "\", inputs: [";
+ for (unsigned i = 0, e = InputInfos.size(); i != e; ++i) {
+ llvm::errs() << InputInfos[i].getAsString();
+ if (i + 1 != e)
+ llvm::errs() << ", ";
+ }
+ llvm::errs() << "], output: " << Result.getAsString() << "\n";
+ } else {
+ T.ConstructJob(C, *JA, *Dest, Result, InputInfos,
+ C.getArgsForToolChain(TC), LinkingOutput);
+ }
+}
+
+const char *Driver::GetNamedOutputPath(Compilation &C,
+ const JobAction &JA,
+ const char *BaseInput,
+ bool AtTopLevel) const {
+ llvm::PrettyStackTraceString CrashInfo("Computing output path");
+ // Output to a user requested destination?
+ if (AtTopLevel) {
+ if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
+ return C.addResultFile(FinalOutput->getValue(C.getArgs()));
+ }
+
+ // Output to a temporary file?
+ if (!AtTopLevel && !C.getArgs().hasArg(options::OPT_save_temps)) {
+ std::string TmpName =
+ GetTemporaryPath(types::getTypeTempSuffix(JA.getType()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ }
+
+ llvm::sys::Path BasePath(BaseInput);
+ std::string BaseName(BasePath.getLast());
+
+ // Determine what the derived output name should be.
+ const char *NamedOutput;
+ if (JA.getType() == types::TY_Image) {
+ NamedOutput = DefaultImageName.c_str();
+ } else {
+ const char *Suffix = types::getTypeTempSuffix(JA.getType());
+ assert(Suffix && "All types used for output should have a suffix.");
+
+ std::string::size_type End = std::string::npos;
+ if (!types::appendSuffixForType(JA.getType()))
+ End = BaseName.rfind('.');
+ std::string Suffixed(BaseName.substr(0, End));
+ Suffixed += '.';
+ Suffixed += Suffix;
+ NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
+ }
+
+ // As an annoying special case, PCH generation doesn't strip the
+ // pathname.
+ if (JA.getType() == types::TY_PCH) {
+ BasePath.eraseComponent();
+ if (BasePath.isEmpty())
+ BasePath = NamedOutput;
+ else
+ BasePath.appendComponent(NamedOutput);
+ return C.addResultFile(C.getArgs().MakeArgString(BasePath.c_str()));
+ } else {
+ return C.addResultFile(NamedOutput);
+ }
+}
+
+llvm::sys::Path Driver::GetFilePath(const char *Name,
+ const ToolChain &TC) const {
+ const ToolChain::path_list &List = TC.getFilePaths();
+ for (ToolChain::path_list::const_iterator
+ it = List.begin(), ie = List.end(); it != ie; ++it) {
+ llvm::sys::Path P(*it);
+ P.appendComponent(Name);
+ if (P.exists())
+ return P;
+ }
+
+ return llvm::sys::Path(Name);
+}
+
+llvm::sys::Path Driver::GetProgramPath(const char *Name,
+ const ToolChain &TC,
+ bool WantFile) const {
+ const ToolChain::path_list &List = TC.getProgramPaths();
+ for (ToolChain::path_list::const_iterator
+ it = List.begin(), ie = List.end(); it != ie; ++it) {
+ llvm::sys::Path P(*it);
+ P.appendComponent(Name);
+ if (WantFile ? P.exists() : P.canExecute())
+ return P;
+ }
+
+ // If all else failed, search the path.
+ llvm::sys::Path P(llvm::sys::Program::FindProgramByName(Name));
+ if (!P.empty())
+ return P;
+
+ return llvm::sys::Path(Name);
+}
+
+std::string Driver::GetTemporaryPath(const char *Suffix) const {
+ // FIXME: This is lame; sys::Path should provide this function (in
+ // particular, it should know how to find the temporary files dir).
+ std::string Error;
+ const char *TmpDir = ::getenv("TMPDIR");
+ if (!TmpDir)
+ TmpDir = ::getenv("TEMP");
+ if (!TmpDir)
+ TmpDir = ::getenv("TMP");
+ if (!TmpDir)
+ TmpDir = "/tmp";
+ llvm::sys::Path P(TmpDir);
+ P.appendComponent("cc");
+ if (P.makeUnique(false, &Error)) {
+ Diag(clang::diag::err_drv_unable_to_make_temp) << Error;
+ return "";
+ }
+
+ // FIXME: Grumble, makeUnique sometimes leaves the file around!?
+ // PR3837.
+ P.eraseFromDisk(false, 0);
+
+ P.appendSuffix(Suffix);
+ return P.toString();
+}
+
+const HostInfo *Driver::GetHostInfo(const char *TripleStr) const {
+ llvm::PrettyStackTraceString CrashInfo("Constructing host");
+ llvm::Triple Triple(TripleStr);
+
+ // Normalize Arch a bit.
+ //
+ // FIXME: We shouldn't need to do this once everything goes through the triple
+ // interface.
+ if (Triple.getArchName() == "i686")
+ Triple.setArchName("i386");
+ else if (Triple.getArchName() == "amd64")
+ Triple.setArchName("x86_64");
+ else if (Triple.getArchName() == "ppc" ||
+ Triple.getArchName() == "Power Macintosh")
+ Triple.setArchName("powerpc");
+ else if (Triple.getArchName() == "ppc64")
+ Triple.setArchName("powerpc64");
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Darwin:
+ return createDarwinHostInfo(*this, Triple);
+ case llvm::Triple::DragonFly:
+ return createDragonFlyHostInfo(*this, Triple);
+ case llvm::Triple::FreeBSD:
+ return createFreeBSDHostInfo(*this, Triple);
+ case llvm::Triple::Linux:
+ return createLinuxHostInfo(*this, Triple);
+ default:
+ return createUnknownHostInfo(*this, Triple);
+ }
+}
+
+bool Driver::ShouldUseClangCompiler(const Compilation &C, const JobAction &JA,
+ const std::string &ArchNameStr) const {
+ // FIXME: Remove this hack.
+ const char *ArchName = ArchNameStr.c_str();
+ if (ArchNameStr == "powerpc")
+ ArchName = "ppc";
+ else if (ArchNameStr == "powerpc64")
+ ArchName = "ppc64";
+
+ // Check if user requested no clang, or clang doesn't understand
+ // this type (we only handle single inputs for now).
+ if (!CCCUseClang || JA.size() != 1 ||
+ !types::isAcceptedByClang((*JA.begin())->getType()))
+ return false;
+
+ // Otherwise make sure this is an action clang understands.
+ if (isa<PreprocessJobAction>(JA)) {
+ if (!CCCUseClangCPP) {
+ Diag(clang::diag::warn_drv_not_using_clang_cpp);
+ return false;
+ }
+ } else if (!isa<PrecompileJobAction>(JA) && !isa<CompileJobAction>(JA))
+ return false;
+
+ // Use clang for C++?
+ if (!CCCUseClangCXX && types::isCXX((*JA.begin())->getType())) {
+ Diag(clang::diag::warn_drv_not_using_clang_cxx);
+ return false;
+ }
+
+ // Always use clang for precompiling, regardless of archs. PTH is
+ // platform independent, and this allows the use of the static
+ // analyzer on platforms we don't have full IRgen support for.
+ if (isa<PrecompileJobAction>(JA))
+ return true;
+
+ // Finally, don't use clang if this isn't one of the user specified
+ // archs to build.
+ if (!CCCClangArchs.empty() && !CCCClangArchs.count(ArchName)) {
+ Diag(clang::diag::warn_drv_not_using_clang_arch) << ArchName;
+ return false;
+ }
+
+ return true;
+}
+
+/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and
+/// return the grouped values as integers. Numbers which are not
+/// provided are set to 0.
+///
+/// \return True if the entire string was parsed (9.2), or all groups
+/// were parsed (10.3.5extrastuff).
+bool Driver::GetReleaseVersion(const char *Str, unsigned &Major,
+ unsigned &Minor, unsigned &Micro,
+ bool &HadExtra) {
+ HadExtra = false;
+
+ Major = Minor = Micro = 0;
+ if (*Str == '\0')
+ return true;
+
+ char *End;
+ Major = (unsigned) strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (*End != '.')
+ return false;
+
+ Str = End+1;
+ Minor = (unsigned) strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (*End != '.')
+ return false;
+
+ Str = End+1;
+ Micro = (unsigned) strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (Str == End)
+ return false;
+ HadExtra = true;
+ return true;
+}
diff --git a/lib/Driver/HostInfo.cpp b/lib/Driver/HostInfo.cpp
new file mode 100644
index 0000000..603b3ab
--- /dev/null
+++ b/lib/Driver/HostInfo.cpp
@@ -0,0 +1,408 @@
+//===--- HostInfo.cpp - Host specific information -----------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/HostInfo.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/Options.h"
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Compiler.h"
+
+#include "ToolChains.h"
+
+#include <cassert>
+
+using namespace clang::driver;
+
+HostInfo::HostInfo(const Driver &D, const llvm::Triple &_Triple)
+ : TheDriver(D), Triple(_Triple)
+{
+
+}
+
+HostInfo::~HostInfo() {
+}
+
+namespace {
+
+// Darwin Host Info
+
+/// DarwinHostInfo - Darwin host information implementation.
+class DarwinHostInfo : public HostInfo {
+ /// Darwin version of host.
+ unsigned DarwinVersion[3];
+
+ /// GCC version to use on this host.
+ unsigned GCCVersion[3];
+
+ /// Cache of tool chains we have created.
+ mutable llvm::StringMap<ToolChain *> ToolChains;
+
+public:
+ DarwinHostInfo(const Driver &D, const llvm::Triple &Triple);
+ ~DarwinHostInfo();
+
+ virtual bool useDriverDriver() const;
+
+ virtual types::ID lookupTypeForExtension(const char *Ext) const {
+ types::ID Ty = types::lookupTypeForExtension(Ext);
+
+ // Darwin always preprocesses assembly files (unless -x is used
+ // explicitly).
+ if (Ty == types::TY_PP_Asm)
+ return types::TY_Asm;
+
+ return Ty;
+ }
+
+ virtual ToolChain *getToolChain(const ArgList &Args,
+ const char *ArchName) const;
+};
+
+DarwinHostInfo::DarwinHostInfo(const Driver &D, const llvm::Triple& Triple)
+ : HostInfo(D, Triple) {
+
+ assert((getArchName() == "i386" || getArchName() == "x86_64" ||
+ getArchName() == "powerpc" || getArchName() == "powerpc64" ||
+ getArchName() == "arm") &&
+ "Unknown Darwin arch.");
+
+ assert(memcmp(&getOSName()[0], "darwin", 6) == 0 &&
+ "Unknown Darwin platform.");
+ bool HadExtra;
+ if (!Driver::GetReleaseVersion(&getOSName()[6],
+ DarwinVersion[0], DarwinVersion[1],
+ DarwinVersion[2], HadExtra)) {
+ D.Diag(clang::diag::err_drv_invalid_darwin_version)
+ << getOSName();
+ }
+
+ // We can only call 4.2.1 for now.
+ GCCVersion[0] = 4;
+ GCCVersion[1] = 2;
+ GCCVersion[2] = 1;
+}
+
+DarwinHostInfo::~DarwinHostInfo() {
+ for (llvm::StringMap<ToolChain*>::iterator
+ it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
+ delete it->second;
+}
+
+bool DarwinHostInfo::useDriverDriver() const {
+ return true;
+}
+
+ToolChain *DarwinHostInfo::getToolChain(const ArgList &Args,
+ const char *ArchName) const {
+ std::string Arch;
+ if (!ArchName) {
+ Arch = getArchName();
+ ArchName = Arch.c_str();
+
+ // If no arch name is specified, infer it from the host and
+ // -m32/-m64.
+ if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
+ if (getArchName() == "i386" || getArchName() == "x86_64") {
+ ArchName =
+ (A->getOption().getId() == options::OPT_m32) ? "i386" : "x86_64";
+ } else if (getArchName() == "powerpc" || getArchName() == "powerpc64") {
+ ArchName = (A->getOption().getId() == options::OPT_m32) ? "powerpc" :
+ "powerpc64";
+ }
+ }
+ } else {
+ // Normalize arch name; we shouldn't be doing this here.
+ //
+ // FIXME: This should be unnecessary once everything moves over to using the
+ // ID based Triple interface.
+ if (strcmp(ArchName, "ppc") == 0)
+ ArchName = "powerpc";
+ else if (strcmp(ArchName, "ppc64") == 0)
+ ArchName = "powerpc64";
+ }
+
+ ToolChain *&TC = ToolChains[ArchName];
+ if (!TC) {
+ llvm::Triple TCTriple(getTriple());
+ TCTriple.setArchName(ArchName);
+
+ if (strcmp(ArchName, "i386") == 0 || strcmp(ArchName, "x86_64") == 0)
+ TC = new toolchains::Darwin_X86(*this, TCTriple,
+ DarwinVersion,
+ GCCVersion);
+ else
+ TC = new toolchains::Darwin_GCC(*this, TCTriple);
+ }
+
+ return TC;
+}
+
+// Unknown Host Info
+
+/// UnknownHostInfo - Generic host information to use for unknown
+/// hosts.
+class UnknownHostInfo : public HostInfo {
+ /// Cache of tool chains we have created.
+ mutable llvm::StringMap<ToolChain*> ToolChains;
+
+public:
+ UnknownHostInfo(const Driver &D, const llvm::Triple& Triple);
+ ~UnknownHostInfo();
+
+ virtual bool useDriverDriver() const;
+
+ virtual types::ID lookupTypeForExtension(const char *Ext) const {
+ return types::lookupTypeForExtension(Ext);
+ }
+
+ virtual ToolChain *getToolChain(const ArgList &Args,
+ const char *ArchName) const;
+};
+
+UnknownHostInfo::UnknownHostInfo(const Driver &D, const llvm::Triple& Triple)
+ : HostInfo(D, Triple) {
+}
+
+UnknownHostInfo::~UnknownHostInfo() {
+ for (llvm::StringMap<ToolChain*>::iterator
+ it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
+ delete it->second;
+}
+
+bool UnknownHostInfo::useDriverDriver() const {
+ return false;
+}
+
+ToolChain *UnknownHostInfo::getToolChain(const ArgList &Args,
+ const char *ArchName) const {
+ assert(!ArchName &&
+ "Unexpected arch name on platform without driver driver support.");
+
+ // Automatically handle some instances of -m32/-m64 we know about.
+ std::string Arch = getArchName();
+ ArchName = Arch.c_str();
+ if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
+ if (getArchName() == "i386" || getArchName() == "x86_64") {
+ ArchName =
+ (A->getOption().getId() == options::OPT_m32) ? "i386" : "x86_64";
+ } else if (getArchName() == "powerpc" || getArchName() == "powerpc64") {
+ ArchName =
+ (A->getOption().getId() == options::OPT_m32) ? "powerpc" : "powerpc64";
+ }
+ }
+
+ ToolChain *&TC = ToolChains[ArchName];
+ if (!TC) {
+ llvm::Triple TCTriple(getTriple());
+ TCTriple.setArchName(ArchName);
+
+ TC = new toolchains::Generic_GCC(*this, TCTriple);
+ }
+
+ return TC;
+}
+
+// FreeBSD Host Info
+
+/// FreeBSDHostInfo - FreeBSD host information implementation.
+class FreeBSDHostInfo : public HostInfo {
+ /// Cache of tool chains we have created.
+ mutable llvm::StringMap<ToolChain*> ToolChains;
+
+public:
+ FreeBSDHostInfo(const Driver &D, const llvm::Triple& Triple)
+ : HostInfo(D, Triple) {}
+ ~FreeBSDHostInfo();
+
+ virtual bool useDriverDriver() const;
+
+ virtual types::ID lookupTypeForExtension(const char *Ext) const {
+ return types::lookupTypeForExtension(Ext);
+ }
+
+ virtual ToolChain *getToolChain(const ArgList &Args,
+ const char *ArchName) const;
+};
+
+FreeBSDHostInfo::~FreeBSDHostInfo() {
+ for (llvm::StringMap<ToolChain*>::iterator
+ it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
+ delete it->second;
+}
+
+bool FreeBSDHostInfo::useDriverDriver() const {
+ return false;
+}
+
+ToolChain *FreeBSDHostInfo::getToolChain(const ArgList &Args,
+ const char *ArchName) const {
+ bool Lib32 = false;
+
+ assert(!ArchName &&
+ "Unexpected arch name on platform without driver driver support.");
+
+ // On x86_64 we need to be able to compile 32-bits binaries as well.
+ // Compiling 64-bit binaries on i386 is not supported. We don't have a
+ // lib64.
+ std::string Arch = getArchName();
+ ArchName = Arch.c_str();
+ if (Args.hasArg(options::OPT_m32) && getArchName() == "x86_64") {
+ ArchName = "i386";
+ Lib32 = true;
+ }
+
+ ToolChain *&TC = ToolChains[ArchName];
+ if (!TC) {
+ llvm::Triple TCTriple(getTriple());
+ TCTriple.setArchName(ArchName);
+
+ TC = new toolchains::FreeBSD(*this, TCTriple, Lib32);
+ }
+
+ return TC;
+}
+
+// DragonFly Host Info
+
+/// DragonFlyHostInfo - DragonFly host information implementation.
+class DragonFlyHostInfo : public HostInfo {
+ /// Cache of tool chains we have created.
+ mutable llvm::StringMap<ToolChain*> ToolChains;
+
+public:
+ DragonFlyHostInfo(const Driver &D, const llvm::Triple& Triple)
+ : HostInfo(D, Triple) {}
+ ~DragonFlyHostInfo();
+
+ virtual bool useDriverDriver() const;
+
+ virtual types::ID lookupTypeForExtension(const char *Ext) const {
+ return types::lookupTypeForExtension(Ext);
+ }
+
+ virtual ToolChain *getToolChain(const ArgList &Args,
+ const char *ArchName) const;
+};
+
+DragonFlyHostInfo::~DragonFlyHostInfo() {
+ for (llvm::StringMap<ToolChain*>::iterator
+ it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
+ delete it->second;
+}
+
+bool DragonFlyHostInfo::useDriverDriver() const {
+ return false;
+}
+
+ToolChain *DragonFlyHostInfo::getToolChain(const ArgList &Args,
+ const char *ArchName) const {
+ assert(!ArchName &&
+ "Unexpected arch name on platform without driver driver support.");
+
+ ToolChain *&TC = ToolChains[getArchName()];
+
+ if (!TC) {
+ llvm::Triple TCTriple(getTriple());
+ TCTriple.setArchName(getArchName());
+
+ TC = new toolchains::DragonFly(*this, TCTriple);
+ }
+
+ return TC;
+}
+
+// Linux Host Info
+
+/// LinuxHostInfo - Linux host information implementation.
+class LinuxHostInfo : public HostInfo {
+ /// Cache of tool chains we have created.
+ mutable llvm::StringMap<ToolChain*> ToolChains;
+
+public:
+ LinuxHostInfo(const Driver &D, const llvm::Triple& Triple)
+ : HostInfo(D, Triple) {}
+ ~LinuxHostInfo();
+
+ virtual bool useDriverDriver() const;
+
+ virtual types::ID lookupTypeForExtension(const char *Ext) const {
+ return types::lookupTypeForExtension(Ext);
+ }
+
+ virtual ToolChain *getToolChain(const ArgList &Args,
+ const char *ArchName) const;
+};
+
+LinuxHostInfo::~LinuxHostInfo() {
+ for (llvm::StringMap<ToolChain*>::iterator
+ it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it)
+ delete it->second;
+}
+
+bool LinuxHostInfo::useDriverDriver() const {
+ return false;
+}
+
+ToolChain *LinuxHostInfo::getToolChain(const ArgList &Args,
+ const char *ArchName) const {
+
+ assert(!ArchName &&
+ "Unexpected arch name on platform without driver driver support.");
+
+ ArchName = getArchName().c_str();
+
+ ToolChain *&TC = ToolChains[ArchName];
+
+ if (!TC) {
+ llvm::Triple TCTriple(getTriple());
+ TCTriple.setArchName(getArchName());
+
+ TC = new toolchains::Linux(*this, TCTriple);
+ }
+
+ return TC;
+}
+
+}
+
+const HostInfo *
+clang::driver::createDarwinHostInfo(const Driver &D,
+ const llvm::Triple& Triple){
+ return new DarwinHostInfo(D, Triple);
+}
+
+const HostInfo *
+clang::driver::createFreeBSDHostInfo(const Driver &D,
+ const llvm::Triple& Triple) {
+ return new FreeBSDHostInfo(D, Triple);
+}
+
+const HostInfo *
+clang::driver::createDragonFlyHostInfo(const Driver &D,
+ const llvm::Triple& Triple) {
+ return new DragonFlyHostInfo(D, Triple);
+}
+
+const HostInfo *
+clang::driver::createLinuxHostInfo(const Driver &D,
+ const llvm::Triple& Triple) {
+ return new LinuxHostInfo(D, Triple);
+}
+
+const HostInfo *
+clang::driver::createUnknownHostInfo(const Driver &D,
+ const llvm::Triple& Triple) {
+ return new UnknownHostInfo(D, Triple);
+}
diff --git a/lib/Driver/InputInfo.h b/lib/Driver/InputInfo.h
new file mode 100644
index 0000000..c657bef
--- /dev/null
+++ b/lib/Driver/InputInfo.h
@@ -0,0 +1,101 @@
+//===--- InputInfo.h - Input Source & Type Information ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_DRIVER_INPUTINFO_H_
+#define CLANG_LIB_DRIVER_INPUTINFO_H_
+
+#include "clang/Driver/Types.h"
+
+#include <cassert>
+#include <string>
+
+namespace clang {
+namespace driver {
+ class PipedJob;
+
+/// InputInfo - Wrapper for information about an input source.
+class InputInfo {
+ // FIXME: The distinction between filenames and inputarg here is
+ // gross; we should probably drop the idea of a "linker
+ // input". Doing so means tweaking pipelining to still create link
+ // steps when it sees linker inputs (but not treat them as
+ // arguments), and making sure that arguments get rendered
+ // correctly.
+ enum Class {
+ Nothing,
+ Filename,
+ InputArg,
+ Pipe
+ };
+
+ union {
+ const char *Filename;
+ const Arg *InputArg;
+ PipedJob *Pipe;
+ } Data;
+ Class Kind;
+ types::ID Type;
+ const char *BaseInput;
+
+public:
+ InputInfo() {}
+ InputInfo(types::ID _Type, const char *_BaseInput)
+ : Kind(Nothing), Type(_Type), BaseInput(_BaseInput) {
+ }
+ InputInfo(const char *_Filename, types::ID _Type, const char *_BaseInput)
+ : Kind(Filename), Type(_Type), BaseInput(_BaseInput) {
+ Data.Filename = _Filename;
+ }
+ InputInfo(const Arg *_InputArg, types::ID _Type, const char *_BaseInput)
+ : Kind(InputArg), Type(_Type), BaseInput(_BaseInput) {
+ Data.InputArg = _InputArg;
+ }
+ InputInfo(PipedJob *_Pipe, types::ID _Type, const char *_BaseInput)
+ : Kind(Pipe), Type(_Type), BaseInput(_BaseInput) {
+ Data.Pipe = _Pipe;
+ }
+
+ bool isNothing() const { return Kind == Nothing; }
+ bool isFilename() const { return Kind == Filename; }
+ bool isInputArg() const { return Kind == InputArg; }
+ bool isPipe() const { return Kind == Pipe; }
+ types::ID getType() const { return Type; }
+ const char *getBaseInput() const { return BaseInput; }
+
+ const char *getFilename() const {
+ assert(isFilename() && "Invalid accessor.");
+ return Data.Filename;
+ }
+ const Arg &getInputArg() const {
+ assert(isInputArg() && "Invalid accessor.");
+ return *Data.InputArg;
+ }
+ PipedJob &getPipe() const {
+ assert(isPipe() && "Invalid accessor.");
+ return *Data.Pipe;
+ }
+
+ /// getAsString - Return a string name for this input, for
+ /// debugging.
+ std::string getAsString() const {
+ if (isPipe())
+ return "(pipe)";
+ else if (isFilename())
+ return std::string("\"") + getFilename() + '"';
+ else if (isInputArg())
+ return "(input arg)";
+ else
+ return "(nothing)";
+ }
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
new file mode 100644
index 0000000..222cf15
--- /dev/null
+++ b/lib/Driver/Job.cpp
@@ -0,0 +1,31 @@
+//===--- Job.cpp - Command to Execute -----------------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Job.h"
+
+#include <cassert>
+using namespace clang::driver;
+
+Job::~Job() {}
+
+Command::Command(const char *_Executable, const ArgStringList &_Arguments)
+ : Job(CommandClass), Executable(_Executable), Arguments(_Arguments) {
+}
+
+PipedJob::PipedJob() : Job(PipedJobClass) {}
+
+JobList::JobList() : Job(JobListClass) {}
+
+void Job::addCommand(Command *C) {
+ if (PipedJob *PJ = dyn_cast<PipedJob>(this))
+ PJ->addCommand(C);
+ else
+ cast<JobList>(this)->addJob(C);
+}
+
diff --git a/lib/Driver/Makefile b/lib/Driver/Makefile
new file mode 100644
index 0000000..d163f0f
--- /dev/null
+++ b/lib/Driver/Makefile
@@ -0,0 +1,28 @@
+##===- clang/lib/Driver/Makefile ---------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangDriver
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+
+include $(LEVEL)/Makefile.common
+
+SVN_REVISION := $(shell cd $(PROJ_SRC_DIR)/../.. && svnversion)
+
+CPP.Defines += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include \
+ -DSVN_REVISION='"$(SVN_REVISION)"'
+
+$(ObjDir)/.ver-svn .ver: $(ObjDir)/.dir
+ @if [ '$(SVN_REVISION)' != '$(shell cat $(ObjDir)/.ver-svn 2>/dev/null)' ]; then\
+ echo '$(SVN_REVISION)' > $(ObjDir)/.ver-svn; \
+ fi
+$(ObjDir)/.ver-svn: .ver
+$(ObjDir)/Driver.o: $(ObjDir)/.ver-svn
diff --git a/lib/Driver/OptTable.cpp b/lib/Driver/OptTable.cpp
new file mode 100644
index 0000000..7ea6a8b
--- /dev/null
+++ b/lib/Driver/OptTable.cpp
@@ -0,0 +1,265 @@
+//===--- Options.cpp - Option info table --------------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Options.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Option.h"
+#include <algorithm>
+#include <cassert>
+
+using namespace clang::driver;
+using namespace clang::driver::options;
+
+struct Info {
+ const char *Name;
+ const char *Flags;
+ const char *HelpText;
+ const char *MetaVar;
+
+ Option::OptionClass Kind;
+ unsigned GroupID;
+ unsigned AliasID;
+ unsigned Param;
+};
+
+// Ordering on Info. The ordering is *almost* lexicographic, with two
+// exceptions. First, '\0' comes at the end of the alphabet instead of
+// the beginning (thus options preceed any other options which prefix
+// them). Second, for options with the same name, the less permissive
+// version should come first; a Flag option should preceed a Joined
+// option, for example.
+
+static int StrCmpOptionName(const char *A, const char *B) {
+ char a = *A, b = *B;
+ while (a == b) {
+ if (a == '\0')
+ return 0;
+
+ a = *++A;
+ b = *++B;
+ }
+
+ if (a == '\0') // A is a prefix of B.
+ return 1;
+ if (b == '\0') // B is a prefix of A.
+ return -1;
+
+ // Otherwise lexicographic.
+ return (a < b) ? -1 : 1;
+}
+
+static inline bool operator<(const Info &A, const Info &B) {
+ if (&A == &B)
+ return false;
+
+ if (int N = StrCmpOptionName(A.Name, B.Name))
+ return N == -1;
+
+ // Names are the same, check that classes are in order; exactly one
+ // should be joined, and it should succeed the other.
+ assert(((A.Kind == Option::JoinedClass) ^ (B.Kind == Option::JoinedClass)) &&
+ "Unexpected classes for options with same name.");
+ return B.Kind == Option::JoinedClass;
+}
+
+//
+
+static Info OptionInfos[] = {
+ // The InputOption info
+ { "<input>", "d", 0, 0, Option::InputClass, OPT_INVALID, OPT_INVALID, 0 },
+ // The UnknownOption info
+ { "<unknown>", "", 0, 0, Option::UnknownClass, OPT_INVALID, OPT_INVALID, 0 },
+
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) \
+ { NAME, FLAGS, HELPTEXT, METAVAR, \
+ Option::KIND##Class, OPT_##GROUP, OPT_##ALIAS, PARAM },
+#include "clang/Driver/Options.def"
+};
+static const unsigned numOptions = sizeof(OptionInfos) / sizeof(OptionInfos[0]);
+
+static Info &getInfo(unsigned id) {
+ assert(id > 0 && id - 1 < numOptions && "Invalid Option ID.");
+ return OptionInfos[id - 1];
+}
+
+OptTable::OptTable() : Options(new Option*[numOptions]()) {
+ // Find start of normal options.
+ FirstSearchableOption = 0;
+ for (unsigned i = OPT_UNKNOWN + 1; i < LastOption; ++i) {
+ if (getInfo(i).Kind != Option::GroupClass) {
+ FirstSearchableOption = i;
+ break;
+ }
+ }
+ assert(FirstSearchableOption != 0 && "No searchable options?");
+
+#ifndef NDEBUG
+ // Check that everything after the first searchable option is a
+ // regular option class.
+ for (unsigned i = FirstSearchableOption; i < LastOption; ++i) {
+ Option::OptionClass Kind = getInfo(i).Kind;
+ assert((Kind != Option::InputClass && Kind != Option::UnknownClass &&
+ Kind != Option::GroupClass) &&
+ "Special options should be defined first!");
+ }
+
+ // Check that options are in order.
+ for (unsigned i = FirstSearchableOption + 1; i < LastOption; ++i) {
+ if (!(getInfo(i - 1) < getInfo(i))) {
+ getOption((options::ID) (i - 1))->dump();
+ getOption((options::ID) i)->dump();
+ assert(0 && "Options are not in order!");
+ }
+ }
+#endif
+}
+
+OptTable::~OptTable() {
+ for (unsigned i = 0; i < numOptions; ++i)
+ delete Options[i];
+ delete[] Options;
+}
+
+unsigned OptTable::getNumOptions() const {
+ return numOptions;
+}
+
+const char *OptTable::getOptionName(options::ID id) const {
+ return getInfo(id).Name;
+}
+
+unsigned OptTable::getOptionKind(options::ID id) const {
+ return getInfo(id).Kind;
+}
+
+const char *OptTable::getOptionHelpText(options::ID id) const {
+ return getInfo(id).HelpText;
+}
+
+const char *OptTable::getOptionMetaVar(options::ID id) const {
+ return getInfo(id).MetaVar;
+}
+
+const Option *OptTable::getOption(options::ID id) const {
+ if (id == OPT_INVALID)
+ return 0;
+
+ assert((unsigned) (id - 1) < numOptions && "Invalid ID.");
+
+ Option *&Entry = Options[id - 1];
+ if (!Entry)
+ Entry = constructOption(id);
+
+ return Entry;
+}
+
+Option *OptTable::constructOption(options::ID id) const {
+ Info &info = getInfo(id);
+ const OptionGroup *Group =
+ cast_or_null<OptionGroup>(getOption((options::ID) info.GroupID));
+ const Option *Alias = getOption((options::ID) info.AliasID);
+
+ Option *Opt = 0;
+ switch (info.Kind) {
+ case Option::InputClass:
+ Opt = new InputOption(); break;
+ case Option::UnknownClass:
+ Opt = new UnknownOption(); break;
+ case Option::GroupClass:
+ Opt = new OptionGroup(id, info.Name, Group); break;
+ case Option::FlagClass:
+ Opt = new FlagOption(id, info.Name, Group, Alias); break;
+ case Option::JoinedClass:
+ Opt = new JoinedOption(id, info.Name, Group, Alias); break;
+ case Option::SeparateClass:
+ Opt = new SeparateOption(id, info.Name, Group, Alias); break;
+ case Option::CommaJoinedClass:
+ Opt = new CommaJoinedOption(id, info.Name, Group, Alias); break;
+ case Option::MultiArgClass:
+ Opt = new MultiArgOption(id, info.Name, Group, Alias, info.Param); break;
+ case Option::JoinedOrSeparateClass:
+ Opt = new JoinedOrSeparateOption(id, info.Name, Group, Alias); break;
+ case Option::JoinedAndSeparateClass:
+ Opt = new JoinedAndSeparateOption(id, info.Name, Group, Alias); break;
+ }
+
+ for (const char *s = info.Flags; *s; ++s) {
+ switch (*s) {
+ default: assert(0 && "Invalid option flag.");
+ case 'J':
+ assert(info.Kind == Option::SeparateClass && "Invalid option.");
+ Opt->setForceJoinedRender(true); break;
+ case 'S':
+ assert(info.Kind == Option::JoinedClass && "Invalid option.");
+ Opt->setForceSeparateRender(true); break;
+ case 'd': Opt->setDriverOption(true); break;
+ case 'i': Opt->setNoOptAsInput(true); break;
+ case 'l': Opt->setLinkerInput(true); break;
+ case 'q': Opt->setNoArgumentUnused(true); break;
+ case 'u': Opt->setUnsupported(true); break;
+ }
+ }
+
+ return Opt;
+}
+
+// Support lower_bound between info and an option name.
+static inline bool operator<(struct Info &I, const char *Name) {
+ return StrCmpOptionName(I.Name, Name) == -1;
+}
+static inline bool operator<(const char *Name, struct Info &I) {
+ return StrCmpOptionName(Name, I.Name) == -1;
+}
+
+Arg *OptTable::ParseOneArg(const InputArgList &Args, unsigned &Index) const {
+ unsigned Prev = Index;
+ const char *Str = Args.getArgString(Index);
+
+ // Anything that doesn't start with '-' is an input, as is '-' itself.
+ if (Str[0] != '-' || Str[1] == '\0')
+ return new PositionalArg(getOption(OPT_INPUT), Index++);
+
+ struct Info *Start = OptionInfos + FirstSearchableOption - 1;
+ struct Info *End = OptionInfos + LastOption - 1;
+
+ // Search for the first next option which could be a prefix.
+ Start = std::lower_bound(Start, End, Str);
+
+ // Options are stored in sorted order, with '\0' at the end of the
+ // alphabet. Since the only options which can accept a string must
+ // prefix it, we iteratively search for the next option which could
+ // be a prefix.
+ //
+ // FIXME: This is searching much more than necessary, but I am
+ // blanking on the simplest way to make it fast. We can solve this
+ // problem when we move to TableGen.
+ for (; Start != End; ++Start) {
+ // Scan for first option which is a proper prefix.
+ for (; Start != End; ++Start)
+ if (memcmp(Str, Start->Name, strlen(Start->Name)) == 0)
+ break;
+ if (Start == End)
+ break;
+
+ // See if this option matches.
+ options::ID id = (options::ID) (Start - OptionInfos + 1);
+ if (Arg *A = getOption(id)->accept(Args, Index))
+ return A;
+
+ // Otherwise, see if this argument was missing values.
+ if (Prev != Index)
+ return 0;
+ }
+
+ return new PositionalArg(getOption(OPT_UNKNOWN), Index++);
+}
+
diff --git a/lib/Driver/Option.cpp b/lib/Driver/Option.cpp
new file mode 100644
index 0000000..cad2bbf
--- /dev/null
+++ b/lib/Driver/Option.cpp
@@ -0,0 +1,250 @@
+//===--- Option.cpp - Abstract Driver Options ---------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Option.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <algorithm>
+using namespace clang::driver;
+
+Option::Option(OptionClass _Kind, options::ID _ID, const char *_Name,
+ const OptionGroup *_Group, const Option *_Alias)
+ : Kind(_Kind), ID(_ID), Name(_Name), Group(_Group), Alias(_Alias),
+ Unsupported(false), LinkerInput(false), NoOptAsInput(false),
+ ForceSeparateRender(false), ForceJoinedRender(false),
+ DriverOption(false), NoArgumentUnused(false)
+{
+
+ // Multi-level aliases are not supported, and alias options cannot
+ // have groups. This just simplifies option tracking, it is not an
+ // inherent limitation.
+ assert((!Alias || (!Alias->Alias && !Group)) &&
+ "Multi-level aliases and aliases with groups are unsupported.");
+}
+
+Option::~Option() {
+}
+
+void Option::dump() const {
+ llvm::errs() << "<";
+ switch (Kind) {
+ default:
+ assert(0 && "Invalid kind");
+#define P(N) case N: llvm::errs() << #N; break
+ P(GroupClass);
+ P(InputClass);
+ P(UnknownClass);
+ P(FlagClass);
+ P(JoinedClass);
+ P(SeparateClass);
+ P(CommaJoinedClass);
+ P(MultiArgClass);
+ P(JoinedOrSeparateClass);
+ P(JoinedAndSeparateClass);
+#undef P
+ }
+
+ llvm::errs() << " Name:\"" << Name << '"';
+
+ if (Group) {
+ llvm::errs() << " Group:";
+ Group->dump();
+ }
+
+ if (Alias) {
+ llvm::errs() << " Alias:";
+ Alias->dump();
+ }
+
+ if (const MultiArgOption *MOA = dyn_cast<MultiArgOption>(this))
+ llvm::errs() << " NumArgs:" << MOA->getNumArgs();
+
+ llvm::errs() << ">\n";
+}
+
+bool Option::matches(const Option *Opt) const {
+ // Aliases are never considered in matching.
+ if (Opt->getAlias())
+ return matches(Opt->getAlias());
+ if (Alias)
+ return Alias->matches(Opt);
+
+ if (this == Opt)
+ return true;
+
+ if (Group)
+ return Group->matches(Opt);
+ return false;
+}
+
+bool Option::matches(options::ID Id) const {
+ // FIXME: Decide what to do here; we should either pull out the
+ // handling of alias on the option for Id from the other matches, or
+ // find some other solution (which hopefully doesn't require using
+ // the option table).
+ if (Alias)
+ return Alias->matches(Id);
+
+ if (ID == Id)
+ return true;
+
+ if (Group)
+ return Group->matches(Id);
+ return false;
+}
+
+OptionGroup::OptionGroup(options::ID ID, const char *Name,
+ const OptionGroup *Group)
+ : Option(Option::GroupClass, ID, Name, Group, 0) {
+}
+
+Arg *OptionGroup::accept(const InputArgList &Args, unsigned &Index) const {
+ assert(0 && "accept() should never be called on an OptionGroup");
+ return 0;
+}
+
+InputOption::InputOption()
+ : Option(Option::InputClass, options::OPT_INPUT, "<input>", 0, 0) {
+}
+
+Arg *InputOption::accept(const InputArgList &Args, unsigned &Index) const {
+ assert(0 && "accept() should never be called on an InputOption");
+ return 0;
+}
+
+UnknownOption::UnknownOption()
+ : Option(Option::UnknownClass, options::OPT_UNKNOWN, "<unknown>", 0, 0) {
+}
+
+Arg *UnknownOption::accept(const InputArgList &Args, unsigned &Index) const {
+ assert(0 && "accept() should never be called on an UnknownOption");
+ return 0;
+}
+
+FlagOption::FlagOption(options::ID ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias)
+ : Option(Option::FlagClass, ID, Name, Group, Alias) {
+}
+
+Arg *FlagOption::accept(const InputArgList &Args, unsigned &Index) const {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (strlen(getName()) != strlen(Args.getArgString(Index)))
+ return 0;
+
+ return new FlagArg(this, Index++);
+}
+
+JoinedOption::JoinedOption(options::ID ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias)
+ : Option(Option::JoinedClass, ID, Name, Group, Alias) {
+}
+
+Arg *JoinedOption::accept(const InputArgList &Args, unsigned &Index) const {
+ // Always matches.
+ return new JoinedArg(this, Index++);
+}
+
+CommaJoinedOption::CommaJoinedOption(options::ID ID, const char *Name,
+ const OptionGroup *Group,
+ const Option *Alias)
+ : Option(Option::CommaJoinedClass, ID, Name, Group, Alias) {
+}
+
+Arg *CommaJoinedOption::accept(const InputArgList &Args,
+ unsigned &Index) const {
+ // Always matches. We count the commas now so we can answer
+ // getNumValues easily.
+
+ // Get the suffix string.
+ // FIXME: Avoid strlen, and move to helper method?
+ const char *Suffix = Args.getArgString(Index) + strlen(getName());
+ return new CommaJoinedArg(this, Index++, Suffix);
+}
+
+SeparateOption::SeparateOption(options::ID ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias)
+ : Option(Option::SeparateClass, ID, Name, Group, Alias) {
+}
+
+Arg *SeparateOption::accept(const InputArgList &Args, unsigned &Index) const {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (strlen(getName()) != strlen(Args.getArgString(Index)))
+ return 0;
+
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new SeparateArg(this, Index - 2, 1);
+}
+
+MultiArgOption::MultiArgOption(options::ID ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias,
+ unsigned _NumArgs)
+ : Option(Option::MultiArgClass, ID, Name, Group, Alias), NumArgs(_NumArgs) {
+ assert(NumArgs > 1 && "Invalid MultiArgOption!");
+}
+
+Arg *MultiArgOption::accept(const InputArgList &Args, unsigned &Index) const {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (strlen(getName()) != strlen(Args.getArgString(Index)))
+ return 0;
+
+ Index += 1 + NumArgs;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new SeparateArg(this, Index - 1 - NumArgs, NumArgs);
+}
+
+JoinedOrSeparateOption::JoinedOrSeparateOption(options::ID ID, const char *Name,
+ const OptionGroup *Group,
+ const Option *Alias)
+ : Option(Option::JoinedOrSeparateClass, ID, Name, Group, Alias) {
+}
+
+Arg *JoinedOrSeparateOption::accept(const InputArgList &Args,
+ unsigned &Index) const {
+ // If this is not an exact match, it is a joined arg.
+ // FIXME: Avoid strlen.
+ if (strlen(getName()) != strlen(Args.getArgString(Index)))
+ return new JoinedArg(this, Index++);
+
+ // Otherwise it must be separate.
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new SeparateArg(this, Index - 2, 1);
+}
+
+JoinedAndSeparateOption::JoinedAndSeparateOption(options::ID ID,
+ const char *Name,
+ const OptionGroup *Group,
+ const Option *Alias)
+ : Option(Option::JoinedAndSeparateClass, ID, Name, Group, Alias) {
+}
+
+Arg *JoinedAndSeparateOption::accept(const InputArgList &Args,
+ unsigned &Index) const {
+ // Always matches.
+
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new JoinedAndSeparateArg(this, Index - 2);
+}
+
diff --git a/lib/Driver/Phases.cpp b/lib/Driver/Phases.cpp
new file mode 100644
index 0000000..df4cdee
--- /dev/null
+++ b/lib/Driver/Phases.cpp
@@ -0,0 +1,27 @@
+//===--- Phases.cpp - Transformations on Driver Types -------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Phases.h"
+
+#include <cassert>
+
+using namespace clang::driver;
+
+const char *phases::getPhaseName(ID Id) {
+ switch (Id) {
+ case Preprocess: return "preprocessor";
+ case Precompile: return "precompiler";
+ case Compile: return "compiler";
+ case Assemble: return "assembler";
+ case Link: return "linker";
+ }
+
+ assert(0 && "Invalid phase id.");
+ return 0;
+}
diff --git a/lib/Driver/Tool.cpp b/lib/Driver/Tool.cpp
new file mode 100644
index 0000000..6f6589a
--- /dev/null
+++ b/lib/Driver/Tool.cpp
@@ -0,0 +1,19 @@
+//===--- Tool.cpp - Compilation Tools -----------------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Tool.h"
+
+using namespace clang::driver;
+
+Tool::Tool(const char *_Name, const ToolChain &TC) : Name(_Name),
+ TheToolChain(TC) {
+}
+
+Tool::~Tool() {
+}
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
new file mode 100644
index 0000000..20ed31b
--- /dev/null
+++ b/lib/Driver/ToolChain.cpp
@@ -0,0 +1,35 @@
+//===--- ToolChain.cpp - Collections of tools for one platform ----------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/ToolChain.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/HostInfo.h"
+
+using namespace clang::driver;
+
+ToolChain::ToolChain(const HostInfo &_Host, const llvm::Triple &_Triple)
+ : Host(_Host), Triple(_Triple) {
+}
+
+ToolChain::~ToolChain() {
+}
+
+llvm::sys::Path ToolChain::GetFilePath(const Compilation &C,
+ const char *Name) const {
+ return Host.getDriver().GetFilePath(Name, *this);
+
+}
+
+llvm::sys::Path ToolChain::GetProgramPath(const Compilation &C,
+ const char *Name,
+ bool WantFile) const {
+ return Host.getDriver().GetProgramPath(Name, *this, WantFile);
+}
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
new file mode 100644
index 0000000..436d343
--- /dev/null
+++ b/lib/Driver/ToolChains.cpp
@@ -0,0 +1,475 @@
+//===--- ToolChains.cpp - ToolChain Implementations ---------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/HostInfo.h"
+#include "clang/Driver/Option.h"
+
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+
+#include <cstdlib> // ::getenv
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+
+/// Darwin_X86 - Darwin tool chain for i386 and x86_64.
+
+Darwin_X86::Darwin_X86(const HostInfo &Host, const llvm::Triple& Triple,
+ const unsigned (&_DarwinVersion)[3],
+ const unsigned (&_GCCVersion)[3])
+ : ToolChain(Host, Triple) {
+ DarwinVersion[0] = _DarwinVersion[0];
+ DarwinVersion[1] = _DarwinVersion[1];
+ DarwinVersion[2] = _DarwinVersion[2];
+ GCCVersion[0] = _GCCVersion[0];
+ GCCVersion[1] = _GCCVersion[1];
+ GCCVersion[2] = _GCCVersion[2];
+
+ llvm::raw_string_ostream(MacosxVersionMin)
+ << "10." << DarwinVersion[0] - 4 << '.' << DarwinVersion[1];
+
+ ToolChainDir = "i686-apple-darwin";
+ ToolChainDir += llvm::utostr(DarwinVersion[0]);
+ ToolChainDir += "/";
+ ToolChainDir += llvm::utostr(GCCVersion[0]);
+ ToolChainDir += '.';
+ ToolChainDir += llvm::utostr(GCCVersion[1]);
+ ToolChainDir += '.';
+ ToolChainDir += llvm::utostr(GCCVersion[2]);
+
+ std::string Path;
+ if (getArchName() == "x86_64") {
+ Path = getHost().getDriver().Dir;
+ Path += "/../lib/gcc/";
+ Path += getToolChainDir();
+ Path += "/x86_64";
+ getFilePaths().push_back(Path);
+
+ Path = "/usr/lib/gcc/";
+ Path += getToolChainDir();
+ Path += "/x86_64";
+ getFilePaths().push_back(Path);
+ }
+
+ Path = getHost().getDriver().Dir;
+ Path += "/../lib/gcc/";
+ Path += getToolChainDir();
+ getFilePaths().push_back(Path);
+
+ Path = "/usr/lib/gcc/";
+ Path += getToolChainDir();
+ getFilePaths().push_back(Path);
+
+ Path = getHost().getDriver().Dir;
+ Path += "/../libexec/gcc/";
+ Path += getToolChainDir();
+ getProgramPaths().push_back(Path);
+
+ Path = "/usr/libexec/gcc/";
+ Path += getToolChainDir();
+ getProgramPaths().push_back(Path);
+
+ Path = getHost().getDriver().Dir;
+ Path += "/../libexec";
+ getProgramPaths().push_back(Path);
+
+ getProgramPaths().push_back(getHost().getDriver().Dir);
+}
+
+Darwin_X86::~Darwin_X86() {
+ // Free tool implementations.
+ for (llvm::DenseMap<unsigned, Tool*>::iterator
+ it = Tools.begin(), ie = Tools.end(); it != ie; ++it)
+ delete it->second;
+}
+
+Tool &Darwin_X86::SelectTool(const Compilation &C,
+ const JobAction &JA) const {
+ Action::ActionClass Key;
+ if (getHost().getDriver().ShouldUseClangCompiler(C, JA, getArchName()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ assert(0 && "Invalid tool kind.");
+ case Action::PreprocessJobClass:
+ T = new tools::darwin::Preprocess(*this); break;
+ case Action::AnalyzeJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::PrecompileJobClass:
+ case Action::CompileJobClass:
+ T = new tools::darwin::Compile(*this); break;
+ case Action::AssembleJobClass:
+ T = new tools::darwin::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::darwin::Link(*this, MacosxVersionMin.c_str()); break;
+ case Action::LipoJobClass:
+ T = new tools::darwin::Lipo(*this); break;
+ }
+ }
+
+ return *T;
+}
+
+DerivedArgList *Darwin_X86::TranslateArgs(InputArgList &Args) const {
+ DerivedArgList *DAL = new DerivedArgList(Args, false);
+ const OptTable &Opts = getHost().getDriver().getOpts();
+
+ // FIXME: We really want to get out of the tool chain level argument
+ // translation business, as it makes the driver functionality much
+ // more opaque. For now, we follow gcc closely solely for the
+ // purpose of easily achieving feature parity & testability. Once we
+ // have something that works, we should reevaluate each translation
+ // and try to push it down into tool specific logic.
+
+ Arg *OSXVersion =
+ Args.getLastArg(options::OPT_mmacosx_version_min_EQ, false);
+ Arg *iPhoneVersion =
+ Args.getLastArg(options::OPT_miphoneos_version_min_EQ, false);
+ if (OSXVersion && iPhoneVersion) {
+ getHost().getDriver().Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << OSXVersion->getAsString(Args)
+ << iPhoneVersion->getAsString(Args);
+ } else if (!OSXVersion && !iPhoneVersion) {
+ // Chose the default version based on the arch.
+ //
+ // FIXME: This will need to be fixed when we merge in arm support.
+
+ // Look for MACOSX_DEPLOYMENT_TARGET, otherwise use the version
+ // from the host.
+ const char *Version = ::getenv("MACOSX_DEPLOYMENT_TARGET");
+ if (!Version)
+ Version = MacosxVersionMin.c_str();
+ const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ DAL->append(DAL->MakeJoinedArg(0, O, Version));
+ }
+
+ for (ArgList::iterator it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ Arg *A = *it;
+
+ if (A->getOption().matches(options::OPT_Xarch__)) {
+ // FIXME: Canonicalize name.
+ if (getArchName() != A->getValue(Args, 0))
+ continue;
+
+ // FIXME: The arg is leaked here, and we should have a nicer
+ // interface for this.
+ unsigned Prev, Index = Prev = A->getIndex() + 1;
+ Arg *XarchArg = Opts.ParseOneArg(Args, Index);
+
+ // If the argument parsing failed or more than one argument was
+ // consumed, the -Xarch_ argument's parameter tried to consume
+ // extra arguments. Emit an error and ignore.
+ //
+ // We also want to disallow any options which would alter the
+ // driver behavior; that isn't going to work in our model. We
+ // use isDriverOption() as an approximation, although things
+ // like -O4 are going to slip through.
+ if (!XarchArg || Index > Prev + 1 ||
+ XarchArg->getOption().isDriverOption()) {
+ getHost().getDriver().Diag(clang::diag::err_drv_invalid_Xarch_argument)
+ << A->getAsString(Args);
+ continue;
+ }
+
+ XarchArg->setBaseArg(A);
+ A = XarchArg;
+ }
+
+ // Sob. These is strictly gcc compatible for the time being. Apple
+ // gcc translates options twice, which means that self-expanding
+ // options add duplicates.
+ options::ID id = A->getOption().getId();
+ switch (id) {
+ default:
+ DAL->append(A);
+ break;
+
+ case options::OPT_mkernel:
+ case options::OPT_fapple_kext:
+ DAL->append(A);
+ DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static)));
+ DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static)));
+ break;
+
+ case options::OPT_dependency_file:
+ DAL->append(DAL->MakeSeparateArg(A, Opts.getOption(options::OPT_MF),
+ A->getValue(Args)));
+ break;
+
+ case options::OPT_gfull:
+ DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_g_Flag)));
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_fno_eliminate_unused_debug_symbols)));
+ break;
+
+ case options::OPT_gused:
+ DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_g_Flag)));
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_feliminate_unused_debug_symbols)));
+ break;
+
+ case options::OPT_fterminated_vtables:
+ case options::OPT_findirect_virtual_calls:
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_fapple_kext)));
+ DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static)));
+ break;
+
+ case options::OPT_shared:
+ DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_dynamiclib)));
+ break;
+
+ case options::OPT_fconstant_cfstrings:
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_mconstant_cfstrings)));
+ break;
+
+ case options::OPT_fno_constant_cfstrings:
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_mno_constant_cfstrings)));
+ break;
+
+ case options::OPT_Wnonportable_cfstrings:
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_mwarn_nonportable_cfstrings)));
+ break;
+
+ case options::OPT_Wno_nonportable_cfstrings:
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings)));
+ break;
+
+ case options::OPT_fpascal_strings:
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_mpascal_strings)));
+ break;
+
+ case options::OPT_fno_pascal_strings:
+ DAL->append(DAL->MakeFlagArg(A,
+ Opts.getOption(options::OPT_mno_pascal_strings)));
+ break;
+ }
+ }
+
+ // FIXME: Actually, gcc always adds this, but it is filtered for
+ // duplicates somewhere. This also changes the order of things, so
+ // look it up.
+ if (getArchName() == "x86_64")
+ if (!Args.hasArg(options::OPT_m64, false))
+ DAL->append(DAL->MakeFlagArg(0, Opts.getOption(options::OPT_m64)));
+
+ if (!Args.hasArg(options::OPT_mtune_EQ, false))
+ DAL->append(DAL->MakeJoinedArg(0, Opts.getOption(options::OPT_mtune_EQ),
+ "core2"));
+
+ return DAL;
+}
+
+bool Darwin_X86::IsMathErrnoDefault() const {
+ return false;
+}
+
+bool Darwin_X86::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+const char *Darwin_X86::GetDefaultRelocationModel() const {
+ return "pic";
+}
+
+const char *Darwin_X86::GetForcedPicModel() const {
+ if (getArchName() == "x86_64")
+ return "pic";
+ return 0;
+}
+
+/// Generic_GCC - A tool chain using the 'gcc' command to perform
+/// all subcommands; this relies on gcc translating the majority of
+/// command line options.
+
+Generic_GCC::Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple)
+ : ToolChain(Host, Triple)
+{
+ std::string Path(getHost().getDriver().Dir);
+ Path += "/../libexec";
+ getProgramPaths().push_back(Path);
+
+ getProgramPaths().push_back(getHost().getDriver().Dir);
+}
+
+Generic_GCC::~Generic_GCC() {
+ // Free tool implementations.
+ for (llvm::DenseMap<unsigned, Tool*>::iterator
+ it = Tools.begin(), ie = Tools.end(); it != ie; ++it)
+ delete it->second;
+}
+
+Tool &Generic_GCC::SelectTool(const Compilation &C,
+ const JobAction &JA) const {
+ Action::ActionClass Key;
+ if (getHost().getDriver().ShouldUseClangCompiler(C, JA, getArchName()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ assert(0 && "Invalid tool kind.");
+ case Action::PreprocessJobClass:
+ T = new tools::gcc::Preprocess(*this); break;
+ case Action::PrecompileJobClass:
+ T = new tools::gcc::Precompile(*this); break;
+ case Action::AnalyzeJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::CompileJobClass:
+ T = new tools::gcc::Compile(*this); break;
+ case Action::AssembleJobClass:
+ T = new tools::gcc::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::gcc::Link(*this); break;
+
+ // This is a bit ungeneric, but the only platform using a driver
+ // driver is Darwin.
+ case Action::LipoJobClass:
+ T = new tools::darwin::Lipo(*this); break;
+ }
+ }
+
+ return *T;
+}
+
+bool Generic_GCC::IsMathErrnoDefault() const {
+ return true;
+}
+
+bool Generic_GCC::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+const char *Generic_GCC::GetDefaultRelocationModel() const {
+ return "static";
+}
+
+const char *Generic_GCC::GetForcedPicModel() const {
+ return 0;
+}
+
+DerivedArgList *Generic_GCC::TranslateArgs(InputArgList &Args) const {
+ return new DerivedArgList(Args, true);
+}
+
+/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
+
+FreeBSD::FreeBSD(const HostInfo &Host, const llvm::Triple& Triple, bool Lib32)
+ : Generic_GCC(Host, Triple) {
+ if (Lib32) {
+ getFilePaths().push_back(getHost().getDriver().Dir + "/../lib32");
+ getFilePaths().push_back("/usr/lib32");
+ } else {
+ getFilePaths().push_back(getHost().getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+ }
+}
+
+Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA) const {
+ Action::ActionClass Key;
+ if (getHost().getDriver().ShouldUseClangCompiler(C, JA, getArchName()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::freebsd::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::freebsd::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA);
+ }
+ }
+
+ return *T;
+}
+
+/// Linux toolchain (very bare-bones at the moment).
+
+Linux::Linux(const HostInfo &Host, const llvm::Triple& Triple)
+ : Generic_GCC(Host, Triple) {
+ getFilePaths().push_back(getHost().getDriver().Dir + "/../lib/clang/1.0/");
+ getFilePaths().push_back("/lib/");
+ getFilePaths().push_back("/usr/lib/");
+ // FIXME: Figure out some way to get gcc's libdir
+ // (e.g. /usr/lib/gcc/i486-linux-gnu/4.3/ for Ubuntu 32-bit); we need
+ // crtbegin.o/crtend.o/etc., and want static versions of various
+ // libraries. If we had our own crtbegin.o/crtend.o/etc, we could probably
+ // get away with using shared versions in /usr/lib, though.
+ // We could fall back to the approach we used for includes (a massive
+ // list), but that's messy at best.
+}
+
+/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
+
+DragonFly::DragonFly(const HostInfo &Host, const llvm::Triple& Triple)
+ : Generic_GCC(Host, Triple) {
+
+ // Path mangling to find libexec
+ std::string Path(getHost().getDriver().Dir);
+
+ Path += "/../libexec";
+ getProgramPaths().push_back(Path);
+ getProgramPaths().push_back(getHost().getDriver().Dir);
+
+ getFilePaths().push_back(getHost().getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back("/usr/lib/gcc41");
+}
+
+Tool &DragonFly::SelectTool(const Compilation &C, const JobAction &JA) const {
+ Action::ActionClass Key;
+ if (getHost().getDriver().ShouldUseClangCompiler(C, JA, getArchName()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::dragonfly::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::dragonfly::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA);
+ }
+ }
+
+ return *T;
+}
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
new file mode 100644
index 0000000..3ecd171
--- /dev/null
+++ b/lib/Driver/ToolChains.h
@@ -0,0 +1,134 @@
+//===--- ToolChains.h - ToolChain Implementations ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_DRIVER_TOOLCHAINS_H_
+#define CLANG_LIB_DRIVER_TOOLCHAINS_H_
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/ToolChain.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Compiler.h"
+
+#include "Tools.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+ /// Generic_GCC - A tool chain using the 'gcc' command to perform
+ /// all subcommands; this relies on gcc translating the majority of
+ /// command line options.
+class VISIBILITY_HIDDEN Generic_GCC : public ToolChain {
+protected:
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+public:
+ Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple);
+ ~Generic_GCC();
+
+ virtual DerivedArgList *TranslateArgs(InputArgList &Args) const;
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
+
+ virtual bool IsMathErrnoDefault() const;
+ virtual bool IsUnwindTablesDefault() const;
+ virtual const char *GetDefaultRelocationModel() const;
+ virtual const char *GetForcedPicModel() const;
+};
+
+ /// Darwin_X86 - Darwin tool chain for i386 an x86_64.
+class VISIBILITY_HIDDEN Darwin_X86 : public ToolChain {
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+ /// Darwin version of tool chain.
+ unsigned DarwinVersion[3];
+
+ /// GCC version to use.
+ unsigned GCCVersion[3];
+
+ /// The directory suffix for this tool chain.
+ std::string ToolChainDir;
+
+ /// The default macosx-version-min of this tool chain; empty until
+ /// initialized.
+ mutable std::string MacosxVersionMin;
+
+ const char *getMacosxVersionMin() const;
+
+public:
+ Darwin_X86(const HostInfo &Host, const llvm::Triple& Triple,
+ const unsigned (&DarwinVersion)[3],
+ const unsigned (&GCCVersion)[3]);
+ ~Darwin_X86();
+
+ void getDarwinVersion(unsigned (&Res)[3]) const {
+ Res[0] = DarwinVersion[0];
+ Res[1] = DarwinVersion[1];
+ Res[2] = DarwinVersion[2];
+ }
+
+ void getMacosxVersion(unsigned (&Res)[3]) const {
+ Res[0] = 10;
+ Res[1] = DarwinVersion[0] - 4;
+ Res[2] = DarwinVersion[1];
+ }
+
+ const char *getMacosxVersionStr() const {
+ return MacosxVersionMin.c_str();
+ }
+
+ const std::string &getToolChainDir() const {
+ return ToolChainDir;
+ }
+
+ virtual DerivedArgList *TranslateArgs(InputArgList &Args) const;
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
+
+ virtual bool IsMathErrnoDefault() const;
+ virtual bool IsUnwindTablesDefault() const;
+ virtual const char *GetDefaultRelocationModel() const;
+ virtual const char *GetForcedPicModel() const;
+};
+
+ /// Darwin_GCC - Generic Darwin tool chain using gcc.
+class VISIBILITY_HIDDEN Darwin_GCC : public Generic_GCC {
+public:
+ Darwin_GCC(const HostInfo &Host, const llvm::Triple& Triple)
+ : Generic_GCC(Host, Triple) {}
+
+ virtual const char *GetDefaultRelocationModel() const { return "pic"; }
+};
+
+class VISIBILITY_HIDDEN FreeBSD : public Generic_GCC {
+public:
+ FreeBSD(const HostInfo &Host, const llvm::Triple& Triple, bool Lib32);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
+};
+
+class VISIBILITY_HIDDEN DragonFly : public Generic_GCC {
+public:
+ DragonFly(const HostInfo &Host, const llvm::Triple& Triple);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
+};
+
+class VISIBILITY_HIDDEN Linux : public Generic_GCC {
+public:
+ Linux(const HostInfo &Host, const llvm::Triple& Triple);
+};
+
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
new file mode 100644
index 0000000..abfabbb
--- /dev/null
+++ b/lib/Driver/Tools.cpp
@@ -0,0 +1,2033 @@
+//===--- Tools.cpp - Tools Implementations ------------------------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Tools.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Driver.h" // FIXME: Remove?
+#include "clang/Driver/DriverDiagnostic.h" // FIXME: Remove?
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/HostInfo.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Util.h"
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "InputInfo.h"
+#include "ToolChains.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+
+static const char *MakeFormattedString(const ArgList &Args,
+ const llvm::format_object_base &Fmt) {
+ std::string Str;
+ llvm::raw_string_ostream(Str) << Fmt;
+ return Args.MakeArgString(Str.c_str());
+}
+
+void Clang::AddPreprocessingOptions(const Driver &D,
+ const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ const InputInfoList &Inputs) const {
+ // Handle dependency file generation.
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_M)) ||
+ (A = Args.getLastArg(options::OPT_MM)) ||
+ (A = Args.getLastArg(options::OPT_MD)) ||
+ (A = Args.getLastArg(options::OPT_MMD))) {
+ // Determine the output location.
+ const char *DepFile;
+ if (Output.getType() == types::TY_Dependencies) {
+ if (Output.isPipe())
+ DepFile = "-";
+ else
+ DepFile = Output.getFilename();
+ } else if (Arg *MF = Args.getLastArg(options::OPT_MF)) {
+ DepFile = MF->getValue(Args);
+ } else if (A->getOption().getId() == options::OPT_M ||
+ A->getOption().getId() == options::OPT_MM) {
+ DepFile = "-";
+ } else {
+ DepFile = darwin::CC1::getDependencyFileName(Args, Inputs);
+ }
+ CmdArgs.push_back("-dependency-file");
+ CmdArgs.push_back(DepFile);
+
+ // Add an -MT option if the user didn't specify their own.
+ // FIXME: This should use -MQ, when we support it.
+ if (!Args.hasArg(options::OPT_MT) && !Args.hasArg(options::OPT_MQ)) {
+ const char *DepTarget;
+
+ // If user provided -o, that is the dependency target, except
+ // when we are only generating a dependency file.
+ Arg *OutputOpt = Args.getLastArg(options::OPT_o);
+ if (OutputOpt && Output.getType() != types::TY_Dependencies) {
+ DepTarget = OutputOpt->getValue(Args);
+ } else {
+ // Otherwise derive from the base input.
+ //
+ // FIXME: This should use the computed output file location.
+ llvm::sys::Path P(Inputs[0].getBaseInput());
+
+ P.eraseSuffix();
+ P.appendSuffix("o");
+ DepTarget = Args.MakeArgString(P.getLast().c_str());
+ }
+
+ CmdArgs.push_back("-MT");
+ CmdArgs.push_back(DepTarget);
+ }
+
+ if (A->getOption().getId() == options::OPT_M ||
+ A->getOption().getId() == options::OPT_MD)
+ CmdArgs.push_back("-sys-header-deps");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_MP);
+ Args.AddAllArgs(CmdArgs, options::OPT_MT);
+
+ // FIXME: Use iterator.
+
+ // Add -i* options, and automatically translate to
+ // -include-pch/-include-pth for transparent PCH support. It's
+ // wonky, but we include looking for .gch so we can support seamless
+ // replacement into a build system already set up to be generating
+ // .gch files.
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (!A->getOption().matches(options::OPT_clang_i_Group))
+ continue;
+
+ if (A->getOption().matches(options::OPT_include)) {
+ bool FoundPTH = false;
+ bool FoundPCH = false;
+ llvm::sys::Path P(A->getValue(Args));
+ if (D.CCCUsePCH) {
+ P.appendSuffix("pch");
+ if (P.exists())
+ FoundPCH = true;
+ else
+ P.eraseSuffix();
+ }
+
+ if (!FoundPCH) {
+ P.appendSuffix("pth");
+ if (P.exists())
+ FoundPTH = true;
+ else
+ P.eraseSuffix();
+ }
+
+ if (!FoundPCH && !FoundPTH) {
+ P.appendSuffix("gch");
+ if (P.exists()) {
+ FoundPCH = D.CCCUsePCH;
+ FoundPTH = !D.CCCUsePCH;
+ }
+ else
+ P.eraseSuffix();
+ }
+
+ if (FoundPCH || FoundPTH) {
+ A->claim();
+ if (D.CCCUsePCH)
+ CmdArgs.push_back("-include-pch");
+ else
+ CmdArgs.push_back("-include-pth");
+ CmdArgs.push_back(Args.MakeArgString(P.c_str()));
+ continue;
+ }
+ }
+
+ // Not translated, render as usual.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U);
+ Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F);
+
+ // Add -Wp, and -Xassembler if using the preprocessor.
+
+ // FIXME: There is a very unfortunate problem here, some troubled
+ // souls abuse -Wp, to pass preprocessor options in gcc syntax. To
+ // really support that we would have to parse and then translate
+ // those options. :(
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wp_COMMA,
+ options::OPT_Xpreprocessor);
+}
+
+void Clang::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getHost().getDriver();
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+
+ CmdArgs.push_back("-triple");
+ const char *TripleStr =
+ Args.MakeArgString(getToolChain().getTripleString().c_str());
+ CmdArgs.push_back(TripleStr);
+
+ if (isa<AnalyzeJobAction>(JA)) {
+ assert(JA.getType() == types::TY_Plist && "Invalid output type.");
+ CmdArgs.push_back("-analyze");
+ } else if (isa<PreprocessJobAction>(JA)) {
+ if (Output.getType() == types::TY_Dependencies)
+ CmdArgs.push_back("-Eonly");
+ else
+ CmdArgs.push_back("-E");
+ } else if (isa<PrecompileJobAction>(JA)) {
+ if (D.CCCUsePCH)
+ CmdArgs.push_back("-emit-pch");
+ else
+ CmdArgs.push_back("-emit-pth");
+ } else {
+ assert(isa<CompileJobAction>(JA) && "Invalid action for clang tool.");
+
+ if (JA.getType() == types::TY_Nothing) {
+ CmdArgs.push_back("-fsyntax-only");
+ } else if (JA.getType() == types::TY_LLVMAsm) {
+ CmdArgs.push_back("-emit-llvm");
+ } else if (JA.getType() == types::TY_LLVMBC) {
+ CmdArgs.push_back("-emit-llvm-bc");
+ } else if (JA.getType() == types::TY_PP_Asm) {
+ CmdArgs.push_back("-S");
+ }
+ }
+
+ // The make clang go fast button.
+ CmdArgs.push_back("-disable-free");
+
+ // Set the main file name, so that debug info works even with
+ // -save-temps.
+ CmdArgs.push_back("-main-file-name");
+ CmdArgs.push_back(darwin::CC1::getBaseInputName(Args, Inputs));
+
+ // Some flags which affect the language (via preprocessor
+ // defines). See darwin::CC1::AddCPPArgs.
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-static-define");
+
+ if (isa<AnalyzeJobAction>(JA)) {
+ // Add default argument set.
+ if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
+ CmdArgs.push_back("-warn-dead-stores");
+ CmdArgs.push_back("-checker-cfref");
+ CmdArgs.push_back("-analyzer-eagerly-assume");
+ CmdArgs.push_back("-warn-objc-methodsigs");
+ // Do not enable the missing -dealloc check.
+ // '-warn-objc-missing-dealloc',
+ CmdArgs.push_back("-warn-objc-unused-ivars");
+ }
+
+ // Set the output format. The default is plist, for (lame) historical
+ // reasons.
+ CmdArgs.push_back("-analyzer-output");
+ if (Arg *A = Args.getLastArg(options::OPT__analyzer_output))
+ CmdArgs.push_back(A->getValue(Args));
+ else
+ CmdArgs.push_back("plist");
+
+ // Add -Xanalyzer arguments when running as analyzer.
+ Args.AddAllArgValues(CmdArgs, options::OPT_Xanalyzer);
+ }
+
+ // Perform argument translation for LLVM backend. This
+ // takes some care in reconciling with llvm-gcc. The
+ // issue is that llvm-gcc translates these options based on
+ // the values in cc1, whereas we are processing based on
+ // the driver arguments.
+ //
+ // FIXME: This is currently broken for -f flags when -fno
+ // variants are present.
+
+ // This comes from the default translation the driver + cc1
+ // would do to enable flag_pic.
+ //
+ // FIXME: Centralize this code.
+ bool PICEnabled = (Args.hasArg(options::OPT_fPIC) ||
+ Args.hasArg(options::OPT_fpic) ||
+ Args.hasArg(options::OPT_fPIE) ||
+ Args.hasArg(options::OPT_fpie));
+ bool PICDisabled = (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_static));
+ const char *Model = getToolChain().GetForcedPicModel();
+ if (!Model) {
+ if (Args.hasArg(options::OPT_mdynamic_no_pic))
+ Model = "dynamic-no-pic";
+ else if (PICDisabled)
+ Model = "static";
+ else if (PICEnabled)
+ Model = "pic";
+ else
+ Model = getToolChain().GetDefaultRelocationModel();
+ }
+ CmdArgs.push_back("--relocation-model");
+ CmdArgs.push_back(Model);
+
+ // Infer the __PIC__ value.
+ //
+ // FIXME: This isn't quite right on Darwin, which always sets
+ // __PIC__=2.
+ if (strcmp(Model, "pic") == 0 || strcmp(Model, "dynamic-no-pic") == 0) {
+ if (Args.hasArg(options::OPT_fPIC))
+ CmdArgs.push_back("-pic-level=2");
+ else
+ CmdArgs.push_back("-pic-level=1");
+ }
+
+ if (Args.hasArg(options::OPT_ftime_report))
+ CmdArgs.push_back("--time-passes");
+ // FIXME: Set --enable-unsafe-fp-math.
+ if (!Args.hasArg(options::OPT_fomit_frame_pointer))
+ CmdArgs.push_back("--disable-fp-elim");
+ if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
+ options::OPT_fno_zero_initialized_in_bss,
+ true))
+ CmdArgs.push_back("--nozero-initialized-in-bss");
+ if (Args.hasArg(options::OPT_dA) || Args.hasArg(options::OPT_fverbose_asm))
+ CmdArgs.push_back("--asm-verbose");
+ if (Args.hasArg(options::OPT_fdebug_pass_structure))
+ CmdArgs.push_back("--debug-pass=Structure");
+ if (Args.hasArg(options::OPT_fdebug_pass_arguments))
+ CmdArgs.push_back("--debug-pass=Arguments");
+ // FIXME: set --inline-threshhold=50 if (optimize_size || optimize
+ // < 3)
+ if (Args.hasFlag(options::OPT_funwind_tables,
+ options::OPT_fno_unwind_tables,
+ (getToolChain().IsUnwindTablesDefault() &&
+ !Args.hasArg(options::OPT_mkernel))))
+ CmdArgs.push_back("--unwind-tables=1");
+ else
+ CmdArgs.push_back("--unwind-tables=0");
+ if (!Args.hasFlag(options::OPT_mred_zone,
+ options::OPT_mno_red_zone,
+ true) ||
+ Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext))
+ CmdArgs.push_back("--disable-red-zone");
+ if (Args.hasFlag(options::OPT_msoft_float,
+ options::OPT_mno_soft_float,
+ false))
+ CmdArgs.push_back("--soft-float");
+
+ // FIXME: Handle -mtune=.
+ (void) Args.hasArg(options::OPT_mtune_EQ);
+
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ // FIXME: We may need some translation here from the options gcc takes to
+ // names the LLVM backend understand?
+ CmdArgs.push_back("-mcpu");
+ CmdArgs.push_back(A->getValue(Args));
+ } else {
+ // Select default CPU.
+
+ // FIXME: Need target hooks.
+ if (memcmp(getToolChain().getOS().c_str(), "darwin", 6) == 0) {
+ if (getToolChain().getArchName() == "x86_64")
+ CmdArgs.push_back("--mcpu=core2");
+ else if (getToolChain().getArchName() == "i386")
+ CmdArgs.push_back("--mcpu=yonah");
+ } else {
+ if (getToolChain().getArchName() == "x86_64")
+ CmdArgs.push_back("--mcpu=x86-64");
+ else if (getToolChain().getArchName() == "i386")
+ CmdArgs.push_back("--mcpu=pentium4");
+ }
+ }
+
+ // FIXME: Use iterator.
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(options::OPT_m_x86_Features_Group)) {
+ const char *Name = A->getOption().getName();
+
+ // Skip over "-m".
+ assert(Name[0] == '-' && Name[1] == 'm' && "Invalid feature name.");
+ Name += 2;
+
+ bool IsNegative = memcmp(Name, "no-", 3) == 0;
+ if (IsNegative)
+ Name += 3;
+
+ A->claim();
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("%c%s",
+ IsNegative ? '-' : '+',
+ Name)));
+ }
+ }
+
+ if (Args.hasFlag(options::OPT_fmath_errno,
+ options::OPT_fno_math_errno,
+ getToolChain().IsMathErrnoDefault()))
+ CmdArgs.push_back("--fmath-errno=1");
+ else
+ CmdArgs.push_back("--fmath-errno=0");
+
+ if (Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
+ CmdArgs.push_back("--limit-float-precision");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ // FIXME: Add --stack-protector-buffer-size=<xxx> on
+ // -fstack-protect.
+
+ Arg *Unsupported;
+ if ((Unsupported = Args.getLastArg(options::OPT_MG)) ||
+ (Unsupported = Args.getLastArg(options::OPT_MQ)) ||
+ (Unsupported = Args.getLastArg(options::OPT_iframework)))
+ D.Diag(clang::diag::err_drv_clang_unsupported)
+ << Unsupported->getOption().getName();
+
+ Args.AddAllArgs(CmdArgs, options::OPT_v);
+ Args.AddLastArg(CmdArgs, options::OPT_P);
+ Args.AddLastArg(CmdArgs, options::OPT_mmacosx_version_min_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_miphoneos_version_min_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
+
+ // Special case debug options to only pass -g to clang. This is
+ // wrong.
+ if (Args.hasArg(options::OPT_g_Group))
+ CmdArgs.push_back("-g");
+
+ Args.AddLastArg(CmdArgs, options::OPT_nostdinc);
+
+ Args.AddLastArg(CmdArgs, options::OPT_isysroot);
+
+ // Add preprocessing options like -I, -D, etc. if we are using the
+ // preprocessor.
+ //
+ // FIXME: Support -fpreprocessed
+ types::ID InputType = Inputs[0].getType();
+ if (types::getPreprocessedType(InputType) != types::TY_INVALID)
+ AddPreprocessingOptions(D, Args, CmdArgs, Output, Inputs);
+
+ // Manually translate -O to -O1 and -O4 to -O3; let clang reject
+ // others.
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().getId() == options::OPT_O4)
+ CmdArgs.push_back("-O3");
+ else if (A->getValue(Args)[0] == '\0')
+ CmdArgs.push_back("-O1");
+ else
+ A->render(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_W_Group, options::OPT_pedantic_Group);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+
+ // Handle -{std, ansi, trigraphs} -- take the last of -{std, ansi}
+ // (-ansi is equivalent to -std=c89).
+ //
+ // If a std is supplied, only add -trigraphs if it follows the
+ // option.
+ if (Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ if (Std->getOption().matches(options::OPT_ansi))
+ CmdArgs.push_back("-std=c89");
+ else
+ Std->render(Args, CmdArgs);
+
+ if (Arg *A = Args.getLastArg(options::OPT_trigraphs))
+ if (A->getIndex() > Std->getIndex())
+ A->render(Args, CmdArgs);
+ } else {
+ // Honor -std-default.
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
+ "-std=", /*Joined=*/true);
+ Args.AddLastArg(CmdArgs, options::OPT_trigraphs);
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_)) {
+ CmdArgs.push_back("-ftemplate-depth");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ // Forward -f options which we can pass directly.
+ Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
+ Args.AddLastArg(CmdArgs, options::OPT_fexceptions);
+ Args.AddLastArg(CmdArgs, options::OPT_ffreestanding);
+ Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_fgnu_runtime);
+ Args.AddLastArg(CmdArgs, options::OPT_flax_vector_conversions);
+ Args.AddLastArg(CmdArgs, options::OPT_fmessage_length_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fms_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_fnext_runtime);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_caret_diagnostics);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_show_column);
+ Args.AddLastArg(CmdArgs, options::OPT_fobjc_gc_only);
+ Args.AddLastArg(CmdArgs, options::OPT_fobjc_gc);
+ Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
+ // FIXME: Should we remove this?
+ Args.AddLastArg(CmdArgs, options::OPT_fobjc_nonfragile_abi);
+ Args.AddLastArg(CmdArgs, options::OPT_fobjc_tight_layout);
+ Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
+ Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings);
+
+ // Forward -f options with positive and negative forms; we translate
+ // these by hand.
+
+ // -fbuiltin is default, only pass non-default.
+ if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin))
+ CmdArgs.push_back("-fbuiltin=0");
+
+ // -fblocks default varies depending on platform and language; only
+ // pass if specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fblocks, options::OPT_fno_blocks)) {
+ if (A->getOption().matches(options::OPT_fblocks))
+ CmdArgs.push_back("-fblocks");
+ else
+ CmdArgs.push_back("-fblocks=0");
+ }
+
+ // -fno-pascal-strings is default, only pass non-default. If the
+ // -tool chain happened to translate to -mpascal-strings, we want to
+ // -back translate here.
+ //
+ // FIXME: This is gross; that translation should be pulled from the
+ // tool chain.
+ if (Args.hasFlag(options::OPT_fpascal_strings,
+ options::OPT_fno_pascal_strings,
+ false) ||
+ Args.hasFlag(options::OPT_mpascal_strings,
+ options::OPT_mno_pascal_strings,
+ false))
+ CmdArgs.push_back("-fpascal-strings");
+
+ // -fcommon is default, only pass non-default.
+ if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common))
+ CmdArgs.push_back("-fno-common");
+
+ // -fsigned-bitfields is default, and clang doesn't yet support
+ // --funsigned-bitfields.
+ if (!Args.hasFlag(options::OPT_fsigned_bitfields,
+ options::OPT_funsigned_bitfields))
+ D.Diag(clang::diag::warn_drv_clang_unsupported)
+ << Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args);
+
+ // -fdiagnostics-fixit-info is default, only pass non-default.
+ if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info,
+ options::OPT_fno_diagnostics_fixit_info))
+ CmdArgs.push_back("-fno-diagnostics-fixit-info");
+
+ // Enable -fdiagnostics-show-option by default.
+ if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
+ options::OPT_fno_diagnostics_show_option))
+ CmdArgs.push_back("-fdiagnostics-show-option");
+
+ // -fdollars-in-identifiers default varies depending on platform and
+ // language; only pass if specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fdollars_in_identifiers,
+ options::OPT_fno_dollars_in_identifiers)) {
+ if (A->getOption().matches(options::OPT_fdollars_in_identifiers))
+ CmdArgs.push_back("-fdollars-in-identifiers=1");
+ else
+ CmdArgs.push_back("-fdollars-in-identifiers=0");
+ }
+
+ // -funit-at-a-time is default, and we don't support -fno-unit-at-a-time for
+ // practical purposes.
+ if (Arg *A = Args.getLastArg(options::OPT_funit_at_a_time,
+ options::OPT_fno_unit_at_a_time)) {
+ if (A->getOption().matches(options::OPT_fno_unit_at_a_time))
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_dM);
+ Args.AddLastArg(CmdArgs, options::OPT_dD);
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
+
+ if (Output.getType() == types::TY_Dependencies) {
+ // Handled with other dependency code.
+ } else if (Output.isPipe()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back("-");
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back(types::getTypeName(II.getType()));
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "clang-cc").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+
+ // Explicitly warn that these options are unsupported, even though
+ // we are allowing compilation to continue.
+ // FIXME: Use iterator.
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(options::OPT_pg)) {
+ A->claim();
+ D.Diag(clang::diag::warn_drv_clang_unsupported)
+ << A->getAsString(Args);
+ }
+ }
+
+ // Claim some arguments which clang supports automatically.
+
+ // -fpch-preprocess is used with gcc to add a special marker in the
+ // -output to include the PCH file. Clang's PTH solution is
+ // -completely transparent, so we do not need to deal with it at
+ // -all.
+ Args.ClaimAllArgs(options::OPT_fpch_preprocess);
+
+ // Claim some arguments which clang doesn't support, but we don't
+ // care to warn the user about.
+
+ // FIXME: Use iterator.
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(options::OPT_clang_ignored_f_Group) ||
+ A->getOption().matches(options::OPT_clang_ignored_m_Group))
+ A->claim();
+ }
+}
+
+void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getHost().getDriver();
+ ArgStringList CmdArgs;
+
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ Arg *A = *it;
+ if (A->getOption().hasForwardToGCC()) {
+ // It is unfortunate that we have to claim here, as this means
+ // we will basically never report anything interesting for
+ // platforms using a generic gcc, even if we are just using gcc
+ // to get to the assembler.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ RenderExtraToolArgs(CmdArgs);
+
+ // If using a driver driver, force the arch.
+ const std::string &Arch = getToolChain().getArchName();
+ if (getToolChain().getHost().useDriverDriver()) {
+ CmdArgs.push_back("-arch");
+
+ // FIXME: Remove these special cases.
+ if (Arch == "powerpc")
+ CmdArgs.push_back("ppc");
+ else if (Arch == "powerpc64")
+ CmdArgs.push_back("ppc64");
+ else
+ CmdArgs.push_back(Args.MakeArgString(Arch.c_str()));
+ }
+
+ // Try to force gcc to match the tool chain we want, if we recognize
+ // the arch.
+ //
+ // FIXME: The triple class should directly provide the information we want
+ // here.
+ if (Arch == "i386" || Arch == "powerpc")
+ CmdArgs.push_back("-m32");
+ else if (Arch == "x86_64" || Arch == "powerpc64")
+ CmdArgs.push_back("-m64");
+
+ if (Output.isPipe()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back("-");
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Unexpected output");
+ CmdArgs.push_back("-fsyntax-only");
+ }
+
+
+ // Only pass -x if gcc will understand it; otherwise hope gcc
+ // understands the suffix correctly. The main use case this would go
+ // wrong in is for linker inputs if they happened to have an odd
+ // suffix; really the only way to get this to happen is a command
+ // like '-x foobar a.c' which will treat a.c like a linker input.
+ //
+ // FIXME: For the linker case specifically, can we safely convert
+ // inputs into '-Wl,' options?
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVMBC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString().c_str();
+
+ if (types::canTypeBeUserSpecified(II.getType())) {
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back(types::getTypeName(II.getType()));
+ }
+
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ // Don't render as input, we need gcc to do the translations.
+ II.getInputArg().render(Args, CmdArgs);
+ }
+
+ const char *GCCName =
+ getToolChain().getHost().getDriver().CCCGenericGCCName.c_str();
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, GCCName).c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+void gcc::Preprocess::RenderExtraToolArgs(ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-E");
+}
+
+void gcc::Precompile::RenderExtraToolArgs(ArgStringList &CmdArgs) const {
+ // The type is good enough.
+}
+
+void gcc::Compile::RenderExtraToolArgs(ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-S");
+}
+
+void gcc::Assemble::RenderExtraToolArgs(ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-c");
+}
+
+void gcc::Link::RenderExtraToolArgs(ArgStringList &CmdArgs) const {
+ // The types are (hopefully) good enough.
+}
+
+const char *darwin::CC1::getCC1Name(types::ID Type) const {
+ switch (Type) {
+ default:
+ assert(0 && "Unexpected type for Darwin CC1 tool.");
+ case types::TY_Asm:
+ case types::TY_C: case types::TY_CHeader:
+ case types::TY_PP_C: case types::TY_PP_CHeader:
+ return "cc1";
+ case types::TY_ObjC: case types::TY_ObjCHeader:
+ case types::TY_PP_ObjC: case types::TY_PP_ObjCHeader:
+ return "cc1obj";
+ case types::TY_CXX: case types::TY_CXXHeader:
+ case types::TY_PP_CXX: case types::TY_PP_CXXHeader:
+ return "cc1plus";
+ case types::TY_ObjCXX: case types::TY_ObjCXXHeader:
+ case types::TY_PP_ObjCXX: case types::TY_PP_ObjCXXHeader:
+ return "cc1objplus";
+ }
+}
+
+const char *darwin::CC1::getBaseInputName(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ llvm::sys::Path P(Inputs[0].getBaseInput());
+ return Args.MakeArgString(P.getLast().c_str());
+}
+
+const char *darwin::CC1::getBaseInputStem(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ const char *Str = getBaseInputName(Args, Inputs);
+
+ if (const char *End = strchr(Str, '.'))
+ return Args.MakeArgString(std::string(Str, End).c_str());
+
+ return Str;
+}
+
+const char *
+darwin::CC1::getDependencyFileName(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ // FIXME: Think about this more.
+ std::string Res;
+
+ if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
+ std::string Str(OutputOpt->getValue(Args));
+
+ Res = Str.substr(0, Str.rfind('.'));
+ } else
+ Res = darwin::CC1::getBaseInputStem(Args, Inputs);
+
+ return Args.MakeArgString((Res + ".d").c_str());
+}
+
+void darwin::CC1::AddCC1Args(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from cc1 spec.
+
+ // FIXME: -fapple-kext seems to disable this too. Investigate.
+ if (!Args.hasArg(options::OPT_mkernel) && !Args.hasArg(options::OPT_static) &&
+ !Args.hasArg(options::OPT_mdynamic_no_pic))
+ CmdArgs.push_back("-fPIC");
+
+ // gcc has some code here to deal with when no -mmacosx-version-min
+ // and no -miphoneos-version-min is present, but this never happens
+ // due to tool chain specific argument translation.
+
+ // FIXME: Remove mthumb
+ // FIXME: Remove mno-thumb
+ // FIXME: Remove faltivec
+ // FIXME: Remove mno-fused-madd
+ // FIXME: Remove mlong-branch
+ // FIXME: Remove mlongcall
+ // FIXME: Remove mcpu=G4
+ // FIXME: Remove mcpu=G5
+
+ if (Args.hasArg(options::OPT_g_Flag) &&
+ !Args.hasArg(options::OPT_fno_eliminate_unused_debug_symbols))
+ CmdArgs.push_back("-feliminate-unused-debug-symbols");
+}
+
+void darwin::CC1::AddCC1OptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const {
+ const Driver &D = getToolChain().getHost().getDriver();
+
+ // Derived from cc1_options spec.
+ if (Args.hasArg(options::OPT_fast) ||
+ Args.hasArg(options::OPT_fastf) ||
+ Args.hasArg(options::OPT_fastcp))
+ CmdArgs.push_back("-O3");
+
+ if (Arg *A = Args.getLastArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_fomit_frame_pointer))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fomit-frame-pointer";
+
+ AddCC1Args(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_Q))
+ CmdArgs.push_back("-quiet");
+
+ CmdArgs.push_back("-dumpbase");
+ CmdArgs.push_back(darwin::CC1::getBaseInputName(Args, Inputs));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_d_Group);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_m_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_a_Group);
+
+ // FIXME: The goal is to use the user provided -o if that is our
+ // final output, otherwise to drive from the original input
+ // name. Find a clean way to go about this.
+ if ((Args.hasArg(options::OPT_c) || Args.hasArg(options::OPT_S)) &&
+ Args.hasArg(options::OPT_o)) {
+ Arg *OutputOpt = Args.getLastArg(options::OPT_o);
+ CmdArgs.push_back("-auxbase-strip");
+ CmdArgs.push_back(OutputOpt->getValue(Args));
+ } else {
+ CmdArgs.push_back("-auxbase");
+ CmdArgs.push_back(darwin::CC1::getBaseInputStem(Args, Inputs));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_g_Group);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_O);
+ // FIXME: -Wall is getting some special treatment. Investigate.
+ Args.AddAllArgs(CmdArgs, options::OPT_W_Group, options::OPT_pedantic_Group);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+ Args.AddAllArgs(CmdArgs, options::OPT_std_EQ, options::OPT_ansi,
+ options::OPT_trigraphs);
+ if (!Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ // Honor -std-default.
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
+ "-std=", /*Joined=*/true);
+ }
+
+ if (Args.hasArg(options::OPT_v))
+ CmdArgs.push_back("-version");
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-p");
+ Args.AddLastArg(CmdArgs, options::OPT_p);
+
+ // The driver treats -fsyntax-only specially.
+ Args.AddAllArgs(CmdArgs, options::OPT_f_Group, options::OPT_fsyntax_only);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_undef);
+ if (Args.hasArg(options::OPT_Qn))
+ CmdArgs.push_back("-fno-ident");
+
+ // FIXME: This isn't correct.
+ //Args.AddLastArg(CmdArgs, options::OPT__help)
+ //Args.AddLastArg(CmdArgs, options::OPT__targetHelp)
+
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+
+ // FIXME: Still don't get what is happening here. Investigate.
+ Args.AddAllArgs(CmdArgs, options::OPT__param);
+
+ if (Args.hasArg(options::OPT_fmudflap) ||
+ Args.hasArg(options::OPT_fmudflapth)) {
+ CmdArgs.push_back("-fno-builtin");
+ CmdArgs.push_back("-fno-merge-constants");
+ }
+
+ if (Args.hasArg(options::OPT_coverage)) {
+ CmdArgs.push_back("-fprofile-arcs");
+ CmdArgs.push_back("-ftest-coverage");
+ }
+
+ if (types::isCXX(Inputs[0].getType()))
+ CmdArgs.push_back("-D__private_extern__=extern");
+}
+
+void darwin::CC1::AddCPPOptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const {
+ // Derived from cpp_options
+ AddCPPUniqueOptionsArgs(Args, CmdArgs, Inputs);
+
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+
+ AddCC1Args(Args, CmdArgs);
+
+ // NOTE: The code below has some commonality with cpp_options, but
+ // in classic gcc style ends up sending things in different
+ // orders. This may be a good merge candidate once we drop pedantic
+ // compatibility.
+
+ Args.AddAllArgs(CmdArgs, options::OPT_m_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_std_EQ, options::OPT_ansi,
+ options::OPT_trigraphs);
+ if (!Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ // Honor -std-default.
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
+ "-std=", /*Joined=*/true);
+ }
+ Args.AddAllArgs(CmdArgs, options::OPT_W_Group, options::OPT_pedantic_Group);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+
+ // The driver treats -fsyntax-only specially.
+ Args.AddAllArgs(CmdArgs, options::OPT_f_Group, options::OPT_fsyntax_only);
+
+ if (Args.hasArg(options::OPT_g_Group) && !Args.hasArg(options::OPT_g0) &&
+ !Args.hasArg(options::OPT_fno_working_directory))
+ CmdArgs.push_back("-fworking-directory");
+
+ Args.AddAllArgs(CmdArgs, options::OPT_O);
+ Args.AddAllArgs(CmdArgs, options::OPT_undef);
+ if (Args.hasArg(options::OPT_save_temps))
+ CmdArgs.push_back("-fpch-preprocess");
+}
+
+void darwin::CC1::AddCPPUniqueOptionsArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const
+{
+ const Driver &D = getToolChain().getHost().getDriver();
+
+ // Derived from cpp_unique_options.
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_C)) ||
+ (A = Args.getLastArg(options::OPT_CC))) {
+ if (!Args.hasArg(options::OPT_E))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-E";
+ }
+ if (!Args.hasArg(options::OPT_Q))
+ CmdArgs.push_back("-quiet");
+ Args.AddAllArgs(CmdArgs, options::OPT_nostdinc);
+ Args.AddLastArg(CmdArgs, options::OPT_v);
+ Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F);
+ Args.AddLastArg(CmdArgs, options::OPT_P);
+
+ // FIXME: Handle %I properly.
+ if (getToolChain().getArchName() == "x86_64") {
+ CmdArgs.push_back("-imultilib");
+ CmdArgs.push_back("x86_64");
+ }
+
+ if (Args.hasArg(options::OPT_MD)) {
+ CmdArgs.push_back("-MD");
+ CmdArgs.push_back(darwin::CC1::getDependencyFileName(Args, Inputs));
+ }
+
+ if (Args.hasArg(options::OPT_MMD)) {
+ CmdArgs.push_back("-MMD");
+ CmdArgs.push_back(darwin::CC1::getDependencyFileName(Args, Inputs));
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_M);
+ Args.AddLastArg(CmdArgs, options::OPT_MM);
+ Args.AddAllArgs(CmdArgs, options::OPT_MF);
+ Args.AddLastArg(CmdArgs, options::OPT_MG);
+ Args.AddLastArg(CmdArgs, options::OPT_MP);
+ Args.AddAllArgs(CmdArgs, options::OPT_MQ);
+ Args.AddAllArgs(CmdArgs, options::OPT_MT);
+ if (!Args.hasArg(options::OPT_M) && !Args.hasArg(options::OPT_MM) &&
+ (Args.hasArg(options::OPT_MD) || Args.hasArg(options::OPT_MMD))) {
+ if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
+ CmdArgs.push_back("-MQ");
+ CmdArgs.push_back(OutputOpt->getValue(Args));
+ }
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_remap);
+ if (Args.hasArg(options::OPT_g3))
+ CmdArgs.push_back("-dD");
+ Args.AddLastArg(CmdArgs, options::OPT_H);
+
+ AddCPPArgs(Args, CmdArgs);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U, options::OPT_A);
+ Args.AddAllArgs(CmdArgs, options::OPT_i_Group);
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wp_COMMA,
+ options::OPT_Xpreprocessor);
+
+ if (Args.hasArg(options::OPT_fmudflap)) {
+ CmdArgs.push_back("-D_MUDFLAP");
+ CmdArgs.push_back("-include");
+ CmdArgs.push_back("mf-runtime.h");
+ }
+
+ if (Args.hasArg(options::OPT_fmudflapth)) {
+ CmdArgs.push_back("-D_MUDFLAP");
+ CmdArgs.push_back("-D_MUDFLAPTH");
+ CmdArgs.push_back("-include");
+ CmdArgs.push_back("mf-runtime.h");
+ }
+}
+
+void darwin::CC1::AddCPPArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from cpp spec.
+
+ if (Args.hasArg(options::OPT_static)) {
+ // The gcc spec is broken here, it refers to dynamic but
+ // that has been translated. Start by being bug compatible.
+
+ // if (!Args.hasArg(arglist.parser.dynamicOption))
+ CmdArgs.push_back("-D__STATIC__");
+ } else
+ CmdArgs.push_back("-D__DYNAMIC__");
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-D_REENTRANT");
+}
+
+void darwin::Preprocess::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs!");
+
+ CmdArgs.push_back("-E");
+
+ if (Args.hasArg(options::OPT_traditional) ||
+ Args.hasArg(options::OPT_ftraditional) ||
+ Args.hasArg(options::OPT_traditional_cpp))
+ CmdArgs.push_back("-traditional-cpp");
+
+ ArgStringList OutputArgs;
+ if (Output.isFilename()) {
+ OutputArgs.push_back("-o");
+ OutputArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isPipe() && "Unexpected CC1 output.");
+ }
+
+ if (Args.hasArg(options::OPT_E)) {
+ AddCPPOptionsArgs(Args, CmdArgs, Inputs, OutputArgs);
+ } else {
+ AddCPPOptionsArgs(Args, CmdArgs, Inputs, ArgStringList());
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_d_Group);
+
+ const char *CC1Name = getCC1Name(Inputs[0].getType());
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, CC1Name).c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+void darwin::Compile::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getHost().getDriver();
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs!");
+
+ types::ID InputType = Inputs[0].getType();
+ const Arg *A;
+ if ((A = Args.getLastArg(options::OPT_traditional)) ||
+ (A = Args.getLastArg(options::OPT_ftraditional)))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-E";
+
+ if (Output.getType() == types::TY_LLVMAsm)
+ CmdArgs.push_back("-emit-llvm");
+ else if (Output.getType() == types::TY_LLVMBC)
+ CmdArgs.push_back("-emit-llvm-bc");
+
+ ArgStringList OutputArgs;
+ if (Output.getType() != types::TY_PCH) {
+ OutputArgs.push_back("-o");
+ if (Output.isPipe())
+ OutputArgs.push_back("-");
+ else if (Output.isNothing())
+ OutputArgs.push_back("/dev/null");
+ else
+ OutputArgs.push_back(Output.getFilename());
+ }
+
+ // There is no need for this level of compatibility, but it makes
+ // diffing easier.
+ bool OutputArgsEarly = (Args.hasArg(options::OPT_fsyntax_only) ||
+ Args.hasArg(options::OPT_S));
+
+ if (types::getPreprocessedType(InputType) != types::TY_INVALID) {
+ AddCPPUniqueOptionsArgs(Args, CmdArgs, Inputs);
+ if (OutputArgsEarly) {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, OutputArgs);
+ } else {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, ArgStringList());
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+ }
+ } else {
+ CmdArgs.push_back("-fpreprocessed");
+
+ // FIXME: There is a spec command to remove
+ // -fpredictive-compilation args here. Investigate.
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ if (OutputArgsEarly) {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, OutputArgs);
+ } else {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, ArgStringList());
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+ }
+ }
+
+ if (Output.getType() == types::TY_PCH) {
+ assert(Output.isFilename() && "Invalid PCH output.");
+
+ CmdArgs.push_back("-o");
+ // NOTE: gcc uses a temp .s file for this, but there doesn't seem
+ // to be a good reason.
+ CmdArgs.push_back("/dev/null");
+
+ CmdArgs.push_back("--output-pch=");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ const char *CC1Name = getCC1Name(Inputs[0].getType());
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, CC1Name).c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+
+ // Bit of a hack, this is only used for original inputs.
+ //
+ // FIXME: This is broken for preprocessed .s inputs.
+ if (Input.isFilename() &&
+ strcmp(Input.getFilename(), Input.getBaseInput()) == 0) {
+ if (Args.hasArg(options::OPT_gstabs))
+ CmdArgs.push_back("--gstabs");
+ else if (Args.hasArg(options::OPT_g_Group))
+ CmdArgs.push_back("--gdwarf2");
+ }
+
+ // Derived from asm spec.
+ CmdArgs.push_back("-arch");
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getArchName().c_str()));
+
+ CmdArgs.push_back("-force_cpusubtype_ALL");
+ if ((Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_fapple_kext)) &&
+ !Args.hasArg(options::OPT_dynamic))
+ CmdArgs.push_back("-static");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ assert(Output.isFilename() && "Unexpected lipo output.");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (Input.isPipe()) {
+ CmdArgs.push_back("-");
+ } else {
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+ }
+
+ // asm_final spec is empty.
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "as").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+/// Helper routine for seeing if we should use dsymutil; this is a
+/// gcc compatible hack, we should remove it and use the input
+/// type information.
+static bool isSourceSuffix(const char *Str) {
+ // match: 'C', 'CPP', 'c', 'cc', 'cp', 'c++', 'cpp', 'cxx', 'm',
+ // 'mm'.
+ switch (strlen(Str)) {
+ default:
+ return false;
+ case 1:
+ return (memcmp(Str, "C", 1) == 0 ||
+ memcmp(Str, "c", 1) == 0 ||
+ memcmp(Str, "m", 1) == 0);
+ case 2:
+ return (memcmp(Str, "cc", 2) == 0 ||
+ memcmp(Str, "cp", 2) == 0 ||
+ memcmp(Str, "mm", 2) == 0);
+ case 3:
+ return (memcmp(Str, "CPP", 3) == 0 ||
+ memcmp(Str, "c++", 3) == 0 ||
+ memcmp(Str, "cpp", 3) == 0 ||
+ memcmp(Str, "cxx", 3) == 0);
+ }
+}
+
+static bool isMacosxVersionLT(unsigned (&A)[3], unsigned (&B)[3]) {
+ for (unsigned i=0; i < 3; ++i) {
+ if (A[i] > B[i]) return false;
+ if (A[i] < B[i]) return true;
+ }
+ return false;
+}
+
+static bool isMacosxVersionLT(unsigned (&A)[3],
+ unsigned V0, unsigned V1=0, unsigned V2=0) {
+ unsigned B[3] = { V0, V1, V2 };
+ return isMacosxVersionLT(A, B);
+}
+
+const toolchains::Darwin_X86 &darwin::Link::getDarwinToolChain() const {
+ return reinterpret_cast<const toolchains::Darwin_X86&>(getToolChain());
+}
+
+void darwin::Link::AddDarwinArch(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from darwin_arch spec.
+ CmdArgs.push_back("-arch");
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().getArchName().c_str()));
+}
+
+void darwin::Link::AddDarwinSubArch(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from darwin_subarch spec, not sure what the distinction
+ // exists for but at least for this chain it is the same.
+ AddDarwinArch(Args, CmdArgs);
+}
+
+void darwin::Link::AddLinkArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getHost().getDriver();
+
+ // Derived from the "link" spec.
+ Args.AddAllArgs(CmdArgs, options::OPT_static);
+ if (!Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-dynamic");
+ if (Args.hasArg(options::OPT_fgnu_runtime)) {
+ // FIXME: gcc replaces -lobjc in forward args with -lobjc-gnu
+ // here. How do we wish to handle such things?
+ }
+
+ if (!Args.hasArg(options::OPT_dynamiclib)) {
+ if (Args.hasArg(options::OPT_force__cpusubtype__ALL)) {
+ AddDarwinArch(Args, CmdArgs);
+ CmdArgs.push_back("-force_cpusubtype_ALL");
+ } else
+ AddDarwinSubArch(Args, CmdArgs);
+
+ Args.AddLastArg(CmdArgs, options::OPT_bundle);
+ Args.AddAllArgs(CmdArgs, options::OPT_bundle__loader);
+ Args.AddAllArgs(CmdArgs, options::OPT_client__name);
+
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_compatibility__version)) ||
+ (A = Args.getLastArg(options::OPT_current__version)) ||
+ (A = Args.getLastArg(options::OPT_install__name)))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-dynamiclib";
+
+ Args.AddLastArg(CmdArgs, options::OPT_force__flat__namespace);
+ Args.AddLastArg(CmdArgs, options::OPT_keep__private__externs);
+ Args.AddLastArg(CmdArgs, options::OPT_private__bundle);
+ } else {
+ CmdArgs.push_back("-dylib");
+
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_bundle)) ||
+ (A = Args.getLastArg(options::OPT_bundle__loader)) ||
+ (A = Args.getLastArg(options::OPT_client__name)) ||
+ (A = Args.getLastArg(options::OPT_force__flat__namespace)) ||
+ (A = Args.getLastArg(options::OPT_keep__private__externs)) ||
+ (A = Args.getLastArg(options::OPT_private__bundle)))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-dynamiclib";
+
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_compatibility__version,
+ "-dylib_compatibility_version");
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_current__version,
+ "-dylib_current_version");
+
+ if (Args.hasArg(options::OPT_force__cpusubtype__ALL)) {
+ AddDarwinArch(Args, CmdArgs);
+ // NOTE: We don't add -force_cpusubtype_ALL on this path. Ok.
+ } else
+ AddDarwinSubArch(Args, CmdArgs);
+
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_install__name,
+ "-dylib_install_name");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_all__load);
+ Args.AddAllArgs(CmdArgs, options::OPT_allowable__client);
+ Args.AddLastArg(CmdArgs, options::OPT_bind__at__load);
+ Args.AddLastArg(CmdArgs, options::OPT_dead__strip);
+ Args.AddLastArg(CmdArgs, options::OPT_no__dead__strip__inits__and__terms);
+ Args.AddAllArgs(CmdArgs, options::OPT_dylib__file);
+ Args.AddLastArg(CmdArgs, options::OPT_dynamic);
+ Args.AddAllArgs(CmdArgs, options::OPT_exported__symbols__list);
+ Args.AddLastArg(CmdArgs, options::OPT_flat__namespace);
+ Args.AddAllArgs(CmdArgs, options::OPT_headerpad__max__install__names);
+ Args.AddAllArgs(CmdArgs, options::OPT_image__base);
+ Args.AddAllArgs(CmdArgs, options::OPT_init);
+
+ if (!Args.hasArg(options::OPT_mmacosx_version_min_EQ)) {
+ if (!Args.hasArg(options::OPT_miphoneos_version_min_EQ)) {
+ // FIXME: I don't understand what is going on here. This is
+ // supposed to come from darwin_ld_minversion, but gcc doesn't
+ // seem to be following that; it must be getting overridden
+ // somewhere.
+ CmdArgs.push_back("-macosx_version_min");
+ CmdArgs.push_back(getDarwinToolChain().getMacosxVersionStr());
+ }
+ } else {
+ // Adding all arguments doesn't make sense here but this is what
+ // gcc does.
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_mmacosx_version_min_EQ,
+ "-macosx_version_min");
+ }
+
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_miphoneos_version_min_EQ,
+ "-iphoneos_version_min");
+ Args.AddLastArg(CmdArgs, options::OPT_nomultidefs);
+ Args.AddLastArg(CmdArgs, options::OPT_multi__module);
+ Args.AddLastArg(CmdArgs, options::OPT_single__module);
+ Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined);
+ Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined__unused);
+
+ if (Args.hasArg(options::OPT_fpie))
+ CmdArgs.push_back("-pie");
+
+ Args.AddLastArg(CmdArgs, options::OPT_prebind);
+ Args.AddLastArg(CmdArgs, options::OPT_noprebind);
+ Args.AddLastArg(CmdArgs, options::OPT_nofixprebinding);
+ Args.AddLastArg(CmdArgs, options::OPT_prebind__all__twolevel__modules);
+ Args.AddLastArg(CmdArgs, options::OPT_read__only__relocs);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectcreate);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectorder);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg1addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segprot);
+ Args.AddAllArgs(CmdArgs, options::OPT_segaddr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__only__addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__write__addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table__filename);
+ Args.AddAllArgs(CmdArgs, options::OPT_sub__library);
+ Args.AddAllArgs(CmdArgs, options::OPT_sub__umbrella);
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_isysroot, "-syslibroot");
+ Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace);
+ Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace__hints);
+ Args.AddAllArgs(CmdArgs, options::OPT_umbrella);
+ Args.AddAllArgs(CmdArgs, options::OPT_undefined);
+ Args.AddAllArgs(CmdArgs, options::OPT_unexported__symbols__list);
+ Args.AddAllArgs(CmdArgs, options::OPT_weak__reference__mismatches);
+
+ if (!Args.hasArg(options::OPT_weak__reference__mismatches)) {
+ CmdArgs.push_back("-weak_reference_mismatches");
+ CmdArgs.push_back("non-weak");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_X_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_y);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+ Args.AddAllArgs(CmdArgs, options::OPT_pagezero__size);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__);
+ Args.AddLastArg(CmdArgs, options::OPT_seglinkedit);
+ Args.AddLastArg(CmdArgs, options::OPT_noseglinkedit);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectalign);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectobjectsymbols);
+ Args.AddAllArgs(CmdArgs, options::OPT_segcreate);
+ Args.AddLastArg(CmdArgs, options::OPT_whyload);
+ Args.AddLastArg(CmdArgs, options::OPT_whatsloaded);
+ Args.AddAllArgs(CmdArgs, options::OPT_dylinker__install__name);
+ Args.AddLastArg(CmdArgs, options::OPT_dylinker);
+ Args.AddLastArg(CmdArgs, options::OPT_Mach);
+}
+
+void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ assert(Output.getType() == types::TY_Image && "Invalid linker output type.");
+ // The logic here is derived from gcc's behavior; most of which
+ // comes from specs (starting with link_command). Consult gcc for
+ // more information.
+
+ // FIXME: The spec references -fdump= which seems to have
+ // disappeared?
+
+ ArgStringList CmdArgs;
+
+ // I'm not sure why this particular decomposition exists in gcc, but
+ // we follow suite for ease of comparison.
+ AddLinkArgs(Args, CmdArgs);
+
+ // FIXME: gcc has %{x} in here. How could this ever happen? Cruft?
+ Args.AddAllArgs(CmdArgs, options::OPT_d_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_A);
+ Args.AddLastArg(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_m_Separate);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ // FIXME: This is just being pedantically bug compatible, gcc
+ // doesn't *mean* to forward this, it just does (yay for pattern
+ // matching). It doesn't work, of course.
+ Args.AddAllArgs(CmdArgs, options::OPT_object);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ unsigned MacosxVersion[3];
+ if (Arg *A = Args.getLastArg(options::OPT_mmacosx_version_min_EQ)) {
+ bool HadExtra;
+ if (!Driver::GetReleaseVersion(A->getValue(Args), MacosxVersion[0],
+ MacosxVersion[1], MacosxVersion[2],
+ HadExtra) ||
+ HadExtra) {
+ const Driver &D = getToolChain().getHost().getDriver();
+ D.Diag(clang::diag::err_drv_invalid_version_number)
+ << A->getAsString(Args);
+ }
+ } else {
+ getDarwinToolChain().getMacosxVersion(MacosxVersion);
+ }
+
+ if (!Args.hasArg(options::OPT_A) &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ // Derived from startfile spec.
+ if (Args.hasArg(options::OPT_dynamiclib)) {
+ // Derived from darwin_dylib1 spec.
+ if (isMacosxVersionLT(MacosxVersion, 10, 5))
+ CmdArgs.push_back("-ldylib1.o");
+ else if (isMacosxVersionLT(MacosxVersion, 10, 6))
+ CmdArgs.push_back("-ldylib1.10.5.o");
+ } else {
+ if (Args.hasArg(options::OPT_bundle)) {
+ if (!Args.hasArg(options::OPT_static)) {
+ // Derived from darwin_bundle1 spec.
+ if (isMacosxVersionLT(MacosxVersion, 10, 6))
+ CmdArgs.push_back("-lbundle1.o");
+ }
+ } else {
+ if (Args.hasArg(options::OPT_pg)) {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lgcrt0.o");
+ } else {
+ CmdArgs.push_back("-lgcrt1.o");
+
+ // darwin_crt2 spec is empty.
+ }
+ } else {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lcrt0.o");
+ } else {
+ // Derived from darwin_crt1 spec.
+ if (isMacosxVersionLT(MacosxVersion, 10, 5))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (isMacosxVersionLT(MacosxVersion, 10, 6))
+ CmdArgs.push_back("-lcrt1.10.5.o");
+ else
+ CmdArgs.push_back("-lcrt1.10.6.o");
+
+ // darwin_crt2 spec is empty.
+ }
+ }
+ }
+ }
+
+ if (Args.hasArg(options::OPT_shared_libgcc) &&
+ !Args.hasArg(options::OPT_miphoneos_version_min_EQ) &&
+ isMacosxVersionLT(MacosxVersion, 10, 5)) {
+ const char *Str = getToolChain().GetFilePath(C, "crt3.o").c_str();
+ CmdArgs.push_back(Args.MakeArgString(Str));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+
+ if (Args.hasArg(options::OPT_fopenmp))
+ // This is more complicated in gcc...
+ CmdArgs.push_back("-lgomp");
+
+ // FIXME: Derive these correctly.
+ const char *TCDir = getDarwinToolChain().getToolChainDir().c_str();
+ if (getToolChain().getArchName() == "x86_64") {
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("-L/usr/lib/gcc/%s/x86_64", TCDir)));
+ // Intentionally duplicated for (temporary) gcc bug compatibility.
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("-L/usr/lib/gcc/%s/x86_64", TCDir)));
+ }
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("-L/usr/lib/%s", TCDir)));
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("-L/usr/lib/gcc/%s", TCDir)));
+ // Intentionally duplicated for (temporary) gcc bug compatibility.
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("-L/usr/lib/gcc/%s", TCDir)));
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("-L/usr/lib/gcc/%s/../../../%s", TCDir, TCDir)));
+ CmdArgs.push_back(MakeFormattedString(Args,
+ llvm::format("-L/usr/lib/gcc/%s/../../..", TCDir)));
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
+ if (LinkingOutput) {
+ CmdArgs.push_back("-arch_multiple");
+ CmdArgs.push_back("-final_output");
+ CmdArgs.push_back(LinkingOutput);
+ }
+
+ if (Args.hasArg(options::OPT_fprofile_arcs) ||
+ Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back("-lgcov");
+
+ if (Args.hasArg(options::OPT_fnested_functions))
+ CmdArgs.push_back("-allow_stack_execute");
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ // FIXME: g++ is more complicated here, it tries to put -lstdc++
+ // before -lm, for example.
+ if (getToolChain().getHost().getDriver().CCCIsCXX)
+ CmdArgs.push_back("-lstdc++");
+
+ // link_ssp spec is empty.
+
+ // Derived from libgcc and lib specs but refactored.
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_static");
+ } else {
+ if (Args.hasArg(options::OPT_static_libgcc)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else if (Args.hasArg(options::OPT_miphoneos_version_min_EQ)) {
+ // Derived from darwin_iphoneos_libgcc spec.
+ CmdArgs.push_back("-lgcc_s.10.5");
+ } else if (Args.hasArg(options::OPT_shared_libgcc) ||
+ Args.hasArg(options::OPT_fexceptions) ||
+ Args.hasArg(options::OPT_fgnu_runtime)) {
+ // FIXME: This is probably broken on 10.3?
+ if (isMacosxVersionLT(MacosxVersion, 10, 5))
+ CmdArgs.push_back("-lgcc_s.10.4");
+ else if (isMacosxVersionLT(MacosxVersion, 10, 6))
+ CmdArgs.push_back("-lgcc_s.10.5");
+ } else {
+ if (isMacosxVersionLT(MacosxVersion, 10, 3, 9))
+ ; // Do nothing.
+ else if (isMacosxVersionLT(MacosxVersion, 10, 5))
+ CmdArgs.push_back("-lgcc_s.10.4");
+ else if (isMacosxVersionLT(MacosxVersion, 10, 6))
+ CmdArgs.push_back("-lgcc_s.10.5");
+ }
+
+ if (isMacosxVersionLT(MacosxVersion, 10, 6)) {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lSystem");
+ } else {
+ CmdArgs.push_back("-lSystem");
+ CmdArgs.push_back("-lgcc");
+ }
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_A) &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ // endfile_spec is empty.
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_F);
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "ld").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+
+ // Find the first non-empty base input (we want to ignore linker
+ // inputs).
+ const char *BaseInput = "";
+ for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
+ if (Inputs[i].getBaseInput()[0] != '\0') {
+ BaseInput = Inputs[i].getBaseInput();
+ break;
+ }
+ }
+
+ // Run dsymutil if we are making an executable in a single step.
+ //
+ // FIXME: Currently we don't want to do this when we are part of a
+ // universal build step, as this would end up creating stray temp
+ // files.
+ if (!LinkingOutput &&
+ Args.getLastArg(options::OPT_g_Group) &&
+ !Args.getLastArg(options::OPT_gstabs) &&
+ !Args.getLastArg(options::OPT_g0)) {
+ // FIXME: This is gross, but matches gcc. The test only considers
+ // the suffix (not the -x type), and then only of the first
+ // source input. Awesome.
+ const char *Suffix = strrchr(BaseInput, '.');
+ if (Suffix && isSourceSuffix(Suffix + 1)) {
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "dsymutil").c_str());
+ ArgStringList CmdArgs;
+ CmdArgs.push_back(Output.getFilename());
+ C.getJobs().addCommand(new Command(Exec, CmdArgs));
+ }
+ }
+}
+
+void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ CmdArgs.push_back("-create");
+ assert(Output.isFilename() && "Unexpected lipo output.");
+
+ CmdArgs.push_back("-output");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ assert(II.isFilename() && "Unexpected lipo input.");
+ CmdArgs.push_back(II.getFilename());
+ }
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "lipo").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+
+void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const
+{
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on FreeBSD/amd64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ if (getToolChain().getArchName() == "i386")
+ CmdArgs.push_back("--32");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ if (Output.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "as").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getHost().getDriver();
+ ArgStringList CmdArgs;
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld-elf.so.1");
+ }
+ }
+
+ // When building 32-bit code on FreeBSD/amd64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (getToolChain().getArchName() == "i386") {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386_fbsd");
+ }
+
+ if (Output.isPipe()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back("-");
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o").c_str()));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o").c_str()));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVMBC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString().c_str();
+
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
+ // the default system libraries. Just mimic this for now.
+ CmdArgs.push_back("-lgcc");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lc");
+
+ CmdArgs.push_back("-lgcc");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o").c_str()));
+ else
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o").c_str()));
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "ld").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+/// DragonFly Tools
+
+// For now, DragonFly Assemble does just about the same as for
+// FreeBSD, but this may change soon.
+void dragonfly::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on DragonFly/pc64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ if (getToolChain().getArchName() == "i386")
+ CmdArgs.push_back("--32");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ if (Output.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "as").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
+
+void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest, const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getHost().getDriver();
+ ArgStringList CmdArgs;
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-Bshareable");
+ else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/usr/libexec/ld-elf.so.2");
+ }
+ }
+
+ // When building 32-bit code on DragonFly/pc64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (getToolChain().getArchName() == "i386") {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386");
+ }
+
+ if (Output.isPipe()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back("-");
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o").c_str()));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o").c_str()));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVMBC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString().c_str();
+
+ if (II.isPipe())
+ CmdArgs.push_back("-");
+ else if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ // FIXME: GCC passes on -lgcc, -lgcc_pic and a whole lot of
+ // rpaths
+ CmdArgs.push_back("-L/usr/lib/gcc41");
+
+ if (!Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("/usr/lib/gcc41");
+
+ CmdArgs.push_back("-rpath-link");
+ CmdArgs.push_back("/usr/lib/gcc41");
+
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("/usr/lib");
+
+ CmdArgs.push_back("-rpath-link");
+ CmdArgs.push_back("/usr/lib");
+ }
+
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-lgcc_pic");
+ } else {
+ CmdArgs.push_back("-lgcc");
+ }
+
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lthread_xu");
+
+ if (!Args.hasArg(options::OPT_nolibc)) {
+ CmdArgs.push_back("-lc");
+ }
+
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-lgcc_pic");
+ } else {
+ CmdArgs.push_back("-lgcc");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o").c_str()));
+ else
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o").c_str()));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o").c_str()));
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "ld").c_str());
+ Dest.addCommand(new Command(Exec, CmdArgs));
+}
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
new file mode 100644
index 0000000..db108db
--- /dev/null
+++ b/lib/Driver/Tools.h
@@ -0,0 +1,316 @@
+//===--- Tools.h - Tool Implementations -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_DRIVER_TOOLS_H_
+#define CLANG_LIB_DRIVER_TOOLS_H_
+
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/Types.h"
+#include "clang/Driver/Util.h"
+
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace driver {
+ class Driver;
+
+namespace toolchains {
+ class Darwin_X86;
+}
+
+namespace tools {
+
+ class VISIBILITY_HIDDEN Clang : public Tool {
+ void AddPreprocessingOptions(const Driver &D,
+ const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ const InputInfoList &Inputs) const;
+
+ public:
+ Clang(const ToolChain &TC) : Tool("clang", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ /// gcc - Generic GCC tool implementations.
+namespace gcc {
+ class VISIBILITY_HIDDEN Common : public Tool {
+ public:
+ Common(const char *Name, const ToolChain &TC) : Tool(Name, TC) {}
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+
+ /// RenderExtraToolArgs - Render any arguments necessary to force
+ /// the particular tool mode.
+ virtual void RenderExtraToolArgs(ArgStringList &CmdArgs) const = 0;
+ };
+
+
+ class VISIBILITY_HIDDEN Preprocess : public Common {
+ public:
+ Preprocess(const ToolChain &TC) : Common("gcc::Preprocess", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void RenderExtraToolArgs(ArgStringList &CmdArgs) const;
+ };
+
+ class VISIBILITY_HIDDEN Precompile : public Common {
+ public:
+ Precompile(const ToolChain &TC) : Common("gcc::Precompile", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return false; }
+ virtual bool hasIntegratedCPP() const { return true; }
+
+ virtual void RenderExtraToolArgs(ArgStringList &CmdArgs) const;
+ };
+
+ class VISIBILITY_HIDDEN Compile : public Common {
+ public:
+ Compile(const ToolChain &TC) : Common("gcc::Compile", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return true; }
+
+ virtual void RenderExtraToolArgs(ArgStringList &CmdArgs) const;
+ };
+
+ class VISIBILITY_HIDDEN Assemble : public Common {
+ public:
+ Assemble(const ToolChain &TC) : Common("gcc::Assemble", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void RenderExtraToolArgs(ArgStringList &CmdArgs) const;
+ };
+
+ class VISIBILITY_HIDDEN Link : public Common {
+ public:
+ Link(const ToolChain &TC) : Common("gcc::Link", TC) {}
+
+ virtual bool acceptsPipedInput() const { return false; }
+ virtual bool canPipeOutput() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void RenderExtraToolArgs(ArgStringList &CmdArgs) const;
+ };
+} // end namespace gcc
+
+namespace darwin {
+ class VISIBILITY_HIDDEN CC1 : public Tool {
+ public:
+ static const char *getBaseInputName(const ArgList &Args,
+ const InputInfoList &Input);
+ static const char *getBaseInputStem(const ArgList &Args,
+ const InputInfoList &Input);
+ static const char *getDependencyFileName(const ArgList &Args,
+ const InputInfoList &Inputs);
+
+ protected:
+ const char *getCC1Name(types::ID Type) const;
+
+ void AddCC1Args(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddCC1OptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const;
+ void AddCPPOptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const;
+ void AddCPPUniqueOptionsArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const;
+ void AddCPPArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+
+ public:
+ CC1(const char *Name, const ToolChain &TC) : Tool(Name, TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return true; }
+ };
+
+ class VISIBILITY_HIDDEN Preprocess : public CC1 {
+ public:
+ Preprocess(const ToolChain &TC) : CC1("darwin::Preprocess", TC) {}
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class VISIBILITY_HIDDEN Compile : public CC1 {
+ public:
+ Compile(const ToolChain &TC) : CC1("darwin::Compile", TC) {}
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class VISIBILITY_HIDDEN Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("darwin::Assemble", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class VISIBILITY_HIDDEN Link : public Tool {
+ void AddDarwinArch(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddDarwinSubArch(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddLinkArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+
+ /// The default macosx-version-min.
+ const char *MacosxVersionMin;
+
+ const toolchains::Darwin_X86 &getDarwinToolChain() const;
+
+ public:
+ Link(const ToolChain &TC,
+ const char *_MacosxVersionMin)
+ : Tool("darwin::Link", TC), MacosxVersionMin(_MacosxVersionMin) {
+ }
+
+ virtual bool acceptsPipedInput() const { return false; }
+ virtual bool canPipeOutput() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class VISIBILITY_HIDDEN Lipo : public Tool {
+ public:
+ Lipo(const ToolChain &TC) : Tool("darwin::Lipo", TC) {}
+
+ virtual bool acceptsPipedInput() const { return false; }
+ virtual bool canPipeOutput() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+}
+
+ /// freebsd -- Directly call GNU Binutils assembler and linker
+namespace freebsd {
+ class VISIBILITY_HIDDEN Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("freebsd::Assemble", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class VISIBILITY_HIDDEN Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("freebsd::Link", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+}
+
+ /// dragonfly -- Directly call GNU Binutils assembler and linker
+namespace dragonfly {
+ class VISIBILITY_HIDDEN Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("dragonfly::Assemble", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class VISIBILITY_HIDDEN Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("dragonfly::Link", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+}
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
new file mode 100644
index 0000000..e89e973
--- /dev/null
+++ b/lib/Driver/Types.cpp
@@ -0,0 +1,205 @@
+//===--- Types.cpp - Driver input & temporary type information ----------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Types.h"
+
+#include <string.h>
+#include <cassert>
+
+using namespace clang::driver;
+using namespace clang::driver::types;
+
+struct Info {
+ const char *Name;
+ const char *Flags;
+ const char *TempSuffix;
+ ID PreprocessedType;
+};
+
+static Info TypeInfos[] = {
+#define TYPE(NAME, ID, PP_TYPE, TEMP_SUFFIX, FLAGS) \
+ { NAME, FLAGS, TEMP_SUFFIX, TY_##PP_TYPE, },
+#include "clang/Driver/Types.def"
+#undef TYPE
+};
+static const unsigned numTypes = sizeof(TypeInfos) / sizeof(TypeInfos[0]);
+
+static Info &getInfo(unsigned id) {
+ assert(id > 0 && id - 1 < numTypes && "Invalid Type ID.");
+ return TypeInfos[id - 1];
+}
+
+const char *types::getTypeName(ID Id) {
+ return getInfo(Id).Name;
+}
+
+types::ID types::getPreprocessedType(ID Id) {
+ return getInfo(Id).PreprocessedType;
+}
+
+const char *types::getTypeTempSuffix(ID Id) {
+ return getInfo(Id).TempSuffix;
+}
+
+bool types::onlyAssembleType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'a');
+}
+
+bool types::onlyPrecompileType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'p');
+}
+
+bool types::canTypeBeUserSpecified(ID Id) {
+ return strchr(getInfo(Id).Flags, 'u');
+}
+
+bool types::appendSuffixForType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'A');
+}
+
+bool types::canLipoType(ID Id) {
+ return (Id == TY_Nothing ||
+ Id == TY_Image ||
+ Id == TY_Object);
+}
+
+bool types::isAcceptedByClang(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_Asm:
+ case TY_C: case TY_PP_C:
+ case TY_ObjC: case TY_PP_ObjC:
+ case TY_CXX: case TY_PP_CXX:
+ case TY_ObjCXX: case TY_PP_ObjCXX:
+ case TY_CHeader: case TY_PP_CHeader:
+ case TY_ObjCHeader: case TY_PP_ObjCHeader:
+ case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ return true;
+ }
+}
+
+bool types::isCXX(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_CXX: case TY_PP_CXX:
+ case TY_ObjCXX: case TY_PP_ObjCXX:
+ case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ return true;
+ }
+}
+
+types::ID types::lookupTypeForExtension(const char *Ext) {
+ unsigned N = strlen(Ext);
+
+ switch (N) {
+ case 1:
+ if (memcmp(Ext, "c", 1) == 0) return TY_C;
+ if (memcmp(Ext, "i", 1) == 0) return TY_PP_C;
+ if (memcmp(Ext, "m", 1) == 0) return TY_ObjC;
+ if (memcmp(Ext, "M", 1) == 0) return TY_ObjCXX;
+ if (memcmp(Ext, "h", 1) == 0) return TY_CHeader;
+ if (memcmp(Ext, "C", 1) == 0) return TY_CXX;
+ if (memcmp(Ext, "H", 1) == 0) return TY_CXXHeader;
+ if (memcmp(Ext, "f", 1) == 0) return TY_PP_Fortran;
+ if (memcmp(Ext, "F", 1) == 0) return TY_Fortran;
+ if (memcmp(Ext, "s", 1) == 0) return TY_PP_Asm;
+ if (memcmp(Ext, "S", 1) == 0) return TY_Asm;
+ case 2:
+ if (memcmp(Ext, "ii", 2) == 0) return TY_PP_CXX;
+ if (memcmp(Ext, "mi", 2) == 0) return TY_PP_ObjC;
+ if (memcmp(Ext, "mm", 2) == 0) return TY_ObjCXX;
+ if (memcmp(Ext, "cc", 2) == 0) return TY_CXX;
+ if (memcmp(Ext, "cc", 2) == 0) return TY_CXX;
+ if (memcmp(Ext, "cp", 2) == 0) return TY_CXX;
+ if (memcmp(Ext, "hh", 2) == 0) return TY_CXXHeader;
+ break;
+ case 3:
+ if (memcmp(Ext, "ads", 3) == 0) return TY_Ada;
+ if (memcmp(Ext, "adb", 3) == 0) return TY_Ada;
+ if (memcmp(Ext, "cxx", 3) == 0) return TY_CXX;
+ if (memcmp(Ext, "cpp", 3) == 0) return TY_CXX;
+ if (memcmp(Ext, "CPP", 3) == 0) return TY_CXX;
+ if (memcmp(Ext, "cXX", 3) == 0) return TY_CXX;
+ if (memcmp(Ext, "for", 3) == 0) return TY_PP_Fortran;
+ if (memcmp(Ext, "FOR", 3) == 0) return TY_PP_Fortran;
+ if (memcmp(Ext, "fpp", 3) == 0) return TY_Fortran;
+ if (memcmp(Ext, "FPP", 3) == 0) return TY_Fortran;
+ if (memcmp(Ext, "f90", 3) == 0) return TY_PP_Fortran;
+ if (memcmp(Ext, "f95", 3) == 0) return TY_PP_Fortran;
+ if (memcmp(Ext, "F90", 3) == 0) return TY_Fortran;
+ if (memcmp(Ext, "F95", 3) == 0) return TY_Fortran;
+ if (memcmp(Ext, "mii", 3) == 0) return TY_PP_ObjCXX;
+ break;
+ }
+
+ return TY_INVALID;
+}
+
+types::ID types::lookupTypeForTypeSpecifier(const char *Name) {
+ unsigned N = strlen(Name);
+
+ for (unsigned i=0; i<numTypes; ++i) {
+ types::ID Id = (types::ID) (i + 1);
+ if (canTypeBeUserSpecified(Id) &&
+ memcmp(Name, getInfo(Id).Name, N + 1) == 0)
+ return Id;
+ }
+
+ return TY_INVALID;
+}
+
+// FIXME: Why don't we just put this list in the defs file, eh.
+
+unsigned types::getNumCompilationPhases(ID Id) {
+ if (Id == TY_Object)
+ return 1;
+
+ unsigned N = 0;
+ if (getPreprocessedType(Id) != TY_INVALID)
+ N += 1;
+
+ if (onlyAssembleType(Id))
+ return N + 2; // assemble, link
+ if (onlyPrecompileType(Id))
+ return N + 1; // precompile
+
+ return N + 3; // compile, assemble, link
+}
+
+phases::ID types::getCompilationPhase(ID Id, unsigned N) {
+ assert(N < getNumCompilationPhases(Id) && "Invalid index.");
+
+ if (Id == TY_Object)
+ return phases::Link;
+
+ if (getPreprocessedType(Id) != TY_INVALID) {
+ if (N == 0)
+ return phases::Preprocess;
+ --N;
+ }
+
+ if (onlyAssembleType(Id))
+ return N == 0 ? phases::Assemble : phases::Link;
+
+ if (onlyPrecompileType(Id))
+ return phases::Precompile;
+
+ if (N == 0)
+ return phases::Compile;
+ if (N == 1)
+ return phases::Assemble;
+
+ return phases::Link;
+}
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
new file mode 100644
index 0000000..11c9251
--- /dev/null
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -0,0 +1,451 @@
+//===--- ASTConsumers.cpp - ASTConsumer implementations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AST Consumer Implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/DocumentXML.h"
+#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "llvm/Module.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+/// ASTPrinter - Pretty-printer and dumper of ASTs
+
+namespace {
+ class ASTPrinter : public ASTConsumer {
+ llvm::raw_ostream &Out;
+ bool Dump;
+
+ public:
+ ASTPrinter(llvm::raw_ostream* o = NULL, bool Dump = false)
+ : Out(o? *o : llvm::errs()), Dump(Dump) { }
+
+ virtual void HandleTranslationUnit(ASTContext &Context) {
+ PrintingPolicy Policy = Context.PrintingPolicy;
+ Policy.Dump = Dump;
+ Context.getTranslationUnitDecl()->print(Out, Context, Policy);
+ }
+ };
+} // end anonymous namespace
+
+ASTConsumer *clang::CreateASTPrinter(llvm::raw_ostream* out) {
+ return new ASTPrinter(out);
+}
+
+//===----------------------------------------------------------------------===//
+/// ASTPrinterXML - XML-printer of ASTs
+
+namespace {
+ class ASTPrinterXML : public ASTConsumer {
+ DocumentXML Doc;
+
+ public:
+ ASTPrinterXML(llvm::raw_ostream& o) : Doc("CLANG_XML", o) {}
+
+ void Initialize(ASTContext &Context) {
+ Doc.initialize(Context);
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ Doc.addSubNode("TranslationUnit");
+ for (DeclContext::decl_iterator
+ D = Ctx.getTranslationUnitDecl()->decls_begin(Ctx),
+ DEnd = Ctx.getTranslationUnitDecl()->decls_end(Ctx);
+ D != DEnd;
+ ++D)
+ {
+ Doc.PrintDecl(*D);
+ }
+ Doc.toParent();
+ Doc.finalize();
+ }
+ };
+} // end anonymous namespace
+
+
+ASTConsumer *clang::CreateASTPrinterXML(llvm::raw_ostream* out) {
+ return new ASTPrinterXML(out ? *out : llvm::outs());
+}
+
+ASTConsumer *clang::CreateASTDumper() {
+ return new ASTPrinter(0, true);
+}
+
+//===----------------------------------------------------------------------===//
+/// ASTViewer - AST Visualization
+
+namespace {
+ class ASTViewer : public ASTConsumer {
+ ASTContext *Context;
+ public:
+ void Initialize(ASTContext &Context) {
+ this->Context = &Context;
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ }
+
+ void HandleTopLevelSingleDecl(Decl *D);
+ };
+}
+
+void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ FD->print(llvm::errs(), *Context);
+
+ if (FD->getBodyIfAvailable()) {
+ llvm::cerr << '\n';
+ FD->getBodyIfAvailable()->viewAST();
+ llvm::cerr << '\n';
+ }
+ return;
+ }
+
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ MD->print(llvm::errs(), *Context);
+
+ if (MD->getBody()) {
+ llvm::cerr << '\n';
+ MD->getBody()->viewAST();
+ llvm::cerr << '\n';
+ }
+ }
+}
+
+
+ASTConsumer *clang::CreateASTViewer() { return new ASTViewer(); }
+
+//===----------------------------------------------------------------------===//
+/// DeclContextPrinter - Decl and DeclContext Visualization
+
+namespace {
+
+class DeclContextPrinter : public ASTConsumer {
+ llvm::raw_ostream& Out;
+public:
+ DeclContextPrinter() : Out(llvm::errs()) {}
+
+ void HandleTranslationUnit(ASTContext &C) {
+ PrintDeclContext(C.getTranslationUnitDecl(), 4);
+ }
+
+ void PrintDeclContext(const DeclContext* DC, unsigned Indentation);
+};
+} // end anonymous namespace
+
+void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
+ unsigned Indentation) {
+ // Print DeclContext name.
+ switch (DC->getDeclKind()) {
+ case Decl::TranslationUnit:
+ Out << "[translation unit] " << DC;
+ break;
+ case Decl::Namespace: {
+ Out << "[namespace] ";
+ const NamespaceDecl* ND = cast<NamespaceDecl>(DC);
+ Out << ND->getNameAsString();
+ break;
+ }
+ case Decl::Enum: {
+ const EnumDecl* ED = cast<EnumDecl>(DC);
+ if (ED->isDefinition())
+ Out << "[enum] ";
+ else
+ Out << "<enum> ";
+ Out << ED->getNameAsString();
+ break;
+ }
+ case Decl::Record: {
+ const RecordDecl* RD = cast<RecordDecl>(DC);
+ if (RD->isDefinition())
+ Out << "[struct] ";
+ else
+ Out << "<struct> ";
+ Out << RD->getNameAsString();
+ break;
+ }
+ case Decl::CXXRecord: {
+ const CXXRecordDecl* RD = cast<CXXRecordDecl>(DC);
+ if (RD->isDefinition())
+ Out << "[class] ";
+ else
+ Out << "<class> ";
+ Out << RD->getNameAsString() << " " << DC;
+ break;
+ }
+ case Decl::ObjCMethod:
+ Out << "[objc method]";
+ break;
+ case Decl::ObjCInterface:
+ Out << "[objc interface]";
+ break;
+ case Decl::ObjCCategory:
+ Out << "[objc category]";
+ break;
+ case Decl::ObjCProtocol:
+ Out << "[objc protocol]";
+ break;
+ case Decl::ObjCImplementation:
+ Out << "[objc implementation]";
+ break;
+ case Decl::ObjCCategoryImpl:
+ Out << "[objc categoryimpl]";
+ break;
+ case Decl::LinkageSpec:
+ Out << "[linkage spec]";
+ break;
+ case Decl::Block:
+ Out << "[block]";
+ break;
+ case Decl::Function: {
+ const FunctionDecl* FD = cast<FunctionDecl>(DC);
+ if (FD->isThisDeclarationADefinition())
+ Out << "[function] ";
+ else
+ Out << "<function> ";
+ Out << FD->getNameAsString();
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = FD->param_begin(),
+ E = FD->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << (*I)->getNameAsString();
+ }
+ Out << ")";
+ break;
+ }
+ case Decl::CXXMethod: {
+ const CXXMethodDecl* D = cast<CXXMethodDecl>(DC);
+ if (D->isOutOfLineDefinition())
+ Out << "[c++ method] ";
+ else if (D->isImplicit())
+ Out << "(c++ method) ";
+ else
+ Out << "<c++ method> ";
+ Out << D->getNameAsString();
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << (*I)->getNameAsString();
+ }
+ Out << ")";
+
+ // Check the semantic DeclContext.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+
+ break;
+ }
+ case Decl::CXXConstructor: {
+ const CXXConstructorDecl* D = cast<CXXConstructorDecl>(DC);
+ if (D->isOutOfLineDefinition())
+ Out << "[c++ ctor] ";
+ else if (D->isImplicit())
+ Out << "(c++ ctor) ";
+ else
+ Out << "<c++ ctor> ";
+ Out << D->getNameAsString();
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << (*I)->getNameAsString();
+ }
+ Out << ")";
+
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+ case Decl::CXXDestructor: {
+ const CXXDestructorDecl* D = cast<CXXDestructorDecl>(DC);
+ if (D->isOutOfLineDefinition())
+ Out << "[c++ dtor] ";
+ else if (D->isImplicit())
+ Out << "(c++ dtor) ";
+ else
+ Out << "<c++ dtor> ";
+ Out << D->getNameAsString();
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+ case Decl::CXXConversion: {
+ const CXXConversionDecl* D = cast<CXXConversionDecl>(DC);
+ if (D->isOutOfLineDefinition())
+ Out << "[c++ conversion] ";
+ else if (D->isImplicit())
+ Out << "(c++ conversion) ";
+ else
+ Out << "<c++ conversion> ";
+ Out << D->getNameAsString();
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+
+ default:
+ assert(0 && "a decl that inherits DeclContext isn't handled");
+ }
+
+ Out << "\n";
+
+ // Print decls in the DeclContext.
+ // FIXME: Should not use a NULL DeclContext!
+ ASTContext *Context = 0;
+ for (DeclContext::decl_iterator I = DC->decls_begin(*Context),
+ E = DC->decls_end(*Context);
+ I != E; ++I) {
+ for (unsigned i = 0; i < Indentation; ++i)
+ Out << " ";
+
+ Decl::Kind DK = I->getKind();
+ switch (DK) {
+ case Decl::Namespace:
+ case Decl::Enum:
+ case Decl::Record:
+ case Decl::CXXRecord:
+ case Decl::ObjCMethod:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCCategoryImpl:
+ case Decl::LinkageSpec:
+ case Decl::Block:
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ {
+ DeclContext* DC = cast<DeclContext>(*I);
+ PrintDeclContext(DC, Indentation+2);
+ break;
+ }
+ case Decl::Field: {
+ FieldDecl* FD = cast<FieldDecl>(*I);
+ Out << "<field> " << FD->getNameAsString() << "\n";
+ break;
+ }
+ case Decl::Typedef: {
+ TypedefDecl* TD = cast<TypedefDecl>(*I);
+ Out << "<typedef> " << TD->getNameAsString() << "\n";
+ break;
+ }
+ case Decl::EnumConstant: {
+ EnumConstantDecl* ECD = cast<EnumConstantDecl>(*I);
+ Out << "<enum constant> " << ECD->getNameAsString() << "\n";
+ break;
+ }
+ case Decl::Var: {
+ VarDecl* VD = cast<VarDecl>(*I);
+ Out << "<var> " << VD->getNameAsString() << "\n";
+ break;
+ }
+ case Decl::ImplicitParam: {
+ ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(*I);
+ Out << "<implicit parameter> " << IPD->getNameAsString() << "\n";
+ break;
+ }
+ case Decl::ParmVar: {
+ ParmVarDecl* PVD = cast<ParmVarDecl>(*I);
+ Out << "<parameter> " << PVD->getNameAsString() << "\n";
+ break;
+ }
+ case Decl::OriginalParmVar: {
+ OriginalParmVarDecl* OPVD = cast<OriginalParmVarDecl>(*I);
+ Out << "<original parameter> " << OPVD->getNameAsString() << "\n";
+ break;
+ }
+ case Decl::ObjCProperty: {
+ ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(*I);
+ Out << "<objc property> " << OPD->getNameAsString() << "\n";
+ break;
+ }
+ default:
+ fprintf(stderr, "DeclKind: %d \"%s\"\n", DK, I->getDeclKindName());
+ assert(0 && "decl unhandled");
+ }
+ }
+}
+ASTConsumer *clang::CreateDeclContextPrinter() {
+ return new DeclContextPrinter();
+}
+
+//===----------------------------------------------------------------------===//
+/// InheritanceViewer - C++ Inheritance Visualization
+
+namespace {
+class InheritanceViewer : public ASTConsumer {
+ const std::string clsname;
+public:
+ InheritanceViewer(const std::string& cname) : clsname(cname) {}
+
+ void HandleTranslationUnit(ASTContext &C) {
+ for (ASTContext::type_iterator I=C.types_begin(),E=C.types_end(); I!=E; ++I)
+ if (RecordType *T = dyn_cast<RecordType>(*I)) {
+ if (CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(T->getDecl())) {
+ // FIXME: This lookup needs to be generalized to handle namespaces and
+ // (when we support them) templates.
+ if (D->getNameAsString() == clsname) {
+ D->viewInheritance(C);
+ }
+ }
+ }
+ }
+};
+}
+
+ASTConsumer *clang::CreateInheritanceViewer(const std::string& clsname) {
+ return new InheritanceViewer(clsname);
+}
diff --git a/lib/Frontend/AnalysisConsumer.cpp b/lib/Frontend/AnalysisConsumer.cpp
new file mode 100644
index 0000000..ae90594
--- /dev/null
+++ b/lib/Frontend/AnalysisConsumer.cpp
@@ -0,0 +1,659 @@
+//===--- AnalysisConsumer.cpp - ASTConsumer for running Analyses ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// "Meta" ASTConsumer for running different source analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/AnalysisConsumer.h"
+#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Frontend/ManagerRegistry.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "clang/AST/CFG.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/PathSensitive/BugReporter.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/LocalCheckers.h"
+#include "clang/Analysis/PathSensitive/GRTransferFuncs.h"
+#include "clang/Analysis/PathSensitive/GRExprEngine.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+#include "llvm/System/Program.h"
+
+using namespace clang;
+
+static ExplodedNodeImpl::Auditor* CreateUbiViz();
+
+//===----------------------------------------------------------------------===//
+// Basic type definitions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class AnalysisManager;
+ typedef void (*CodeAction)(AnalysisManager& Mgr);
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer declaration.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+ class VISIBILITY_HIDDEN AnalysisConsumer : public ASTConsumer {
+ typedef std::vector<CodeAction> Actions;
+ Actions FunctionActions;
+ Actions ObjCMethodActions;
+ Actions ObjCImplementationActions;
+ Actions TranslationUnitActions;
+
+ public:
+ const LangOptions& LOpts;
+ Diagnostic &Diags;
+ ASTContext* Ctx;
+ Preprocessor* PP;
+ PreprocessorFactory* PPF;
+ const std::string OutDir;
+ AnalyzerOptions Opts;
+ llvm::OwningPtr<PathDiagnosticClient> PD;
+
+ AnalysisConsumer(Diagnostic &diags, Preprocessor* pp,
+ PreprocessorFactory* ppf,
+ const LangOptions& lopts,
+ const std::string& outdir,
+ const AnalyzerOptions& opts)
+ : LOpts(lopts), Diags(diags),
+ Ctx(0), PP(pp), PPF(ppf),
+ OutDir(outdir), Opts(opts) {}
+
+ void addCodeAction(CodeAction action) {
+ FunctionActions.push_back(action);
+ ObjCMethodActions.push_back(action);
+ }
+
+ void addObjCImplementationAction(CodeAction action) {
+ ObjCImplementationActions.push_back(action);
+ }
+
+ void addTranslationUnitAction(CodeAction action) {
+ TranslationUnitActions.push_back(action);
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ }
+
+ void HandleTopLevelSingleDecl(Decl *D);
+ virtual void HandleTranslationUnit(ASTContext &C);
+
+ void HandleCode(Decl* D, Stmt* Body, Actions& actions);
+ };
+
+
+ class VISIBILITY_HIDDEN AnalysisManager : public BugReporterData {
+ Decl* D; Stmt* Body;
+
+ enum AnalysisScope { ScopeTU, ScopeDecl } AScope;
+
+ AnalysisConsumer& C;
+ bool DisplayedFunction;
+
+ llvm::OwningPtr<CFG> cfg;
+ llvm::OwningPtr<LiveVariables> liveness;
+ llvm::OwningPtr<ParentMap> PM;
+
+ // Configurable components creators.
+ StoreManagerCreator CreateStoreMgr;
+ ConstraintManagerCreator CreateConstraintMgr;
+
+ public:
+ AnalysisManager(AnalysisConsumer& c, Decl* d, Stmt* b, bool displayProgress)
+ : D(d), Body(b), AScope(ScopeDecl), C(c),
+ DisplayedFunction(!displayProgress) {
+ setManagerCreators();
+ }
+
+ AnalysisManager(AnalysisConsumer& c, bool displayProgress)
+ : D(0), Body(0), AScope(ScopeTU), C(c),
+ DisplayedFunction(!displayProgress) {
+ setManagerCreators();
+ }
+
+ Decl* getCodeDecl() const {
+ assert (AScope == ScopeDecl);
+ return D;
+ }
+
+ Stmt* getBody() const {
+ assert (AScope == ScopeDecl);
+ return Body;
+ }
+
+ StoreManagerCreator getStoreManagerCreator() {
+ return CreateStoreMgr;
+ };
+
+ ConstraintManagerCreator getConstraintManagerCreator() {
+ return CreateConstraintMgr;
+ }
+
+ virtual CFG* getCFG() {
+ if (!cfg) cfg.reset(CFG::buildCFG(getBody()));
+ return cfg.get();
+ }
+
+ virtual ParentMap& getParentMap() {
+ if (!PM)
+ PM.reset(new ParentMap(getBody()));
+ return *PM.get();
+ }
+
+ virtual ASTContext& getContext() {
+ return *C.Ctx;
+ }
+
+ virtual SourceManager& getSourceManager() {
+ return getContext().getSourceManager();
+ }
+
+ virtual Diagnostic& getDiagnostic() {
+ return C.Diags;
+ }
+
+ const LangOptions& getLangOptions() const {
+ return C.LOpts;
+ }
+
+ virtual PathDiagnosticClient* getPathDiagnosticClient() {
+ if (C.PD.get() == 0 && !C.OutDir.empty()) {
+ switch (C.Opts.AnalysisDiagOpt) {
+ default:
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE)\
+case PD_##NAME: C.PD.reset(CREATEFN(C.OutDir, C.PP, C.PPF)); break;
+#include "clang/Frontend/Analyses.def"
+ }
+ }
+ return C.PD.get();
+ }
+
+ virtual LiveVariables* getLiveVariables() {
+ if (!liveness) {
+ CFG* c = getCFG();
+ if (!c) return 0;
+
+ liveness.reset(new LiveVariables(getContext(), *c));
+ liveness->runOnCFG(*c);
+ liveness->runOnAllBlocks(*c, 0, true);
+ }
+
+ return liveness.get();
+ }
+
+ bool shouldVisualizeGraphviz() const { return C.Opts.VisualizeEGDot; }
+
+ bool shouldVisualizeUbigraph() const { return C.Opts.VisualizeEGUbi; }
+
+ bool shouldVisualize() const {
+ return C.Opts.VisualizeEGDot || C.Opts.VisualizeEGUbi;
+ }
+
+ bool shouldTrimGraph() const { return C.Opts.TrimGraph; }
+
+ bool shouldPurgeDead() const { return C.Opts.PurgeDead; }
+
+ bool shouldEagerlyAssume() const { return C.Opts.EagerlyAssume; }
+
+ void DisplayFunction() {
+
+ if (DisplayedFunction)
+ return;
+
+ DisplayedFunction = true;
+
+ // FIXME: Is getCodeDecl() always a named decl?
+ if (isa<FunctionDecl>(getCodeDecl()) ||
+ isa<ObjCMethodDecl>(getCodeDecl())) {
+ NamedDecl *ND = cast<NamedDecl>(getCodeDecl());
+ SourceManager &SM = getContext().getSourceManager();
+ llvm::cerr << "ANALYZE: "
+ << SM.getPresumedLoc(ND->getLocation()).getFilename()
+ << ' ' << ND->getNameAsString() << '\n';
+ }
+ }
+
+ private:
+ /// Set configurable analyzer components creators. First check if there are
+ /// components registered at runtime. Otherwise fall back to builtin
+ /// components.
+ void setManagerCreators() {
+ if (ManagerRegistry::StoreMgrCreator != 0) {
+ CreateStoreMgr = ManagerRegistry::StoreMgrCreator;
+ }
+ else {
+ switch (C.Opts.AnalysisStoreOpt) {
+ default:
+ assert(0 && "Unknown store manager.");
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
+ case NAME##Model: CreateStoreMgr = CREATEFN; break;
+#include "clang/Frontend/Analyses.def"
+ }
+ }
+
+ if (ManagerRegistry::ConstraintMgrCreator != 0)
+ CreateConstraintMgr = ManagerRegistry::ConstraintMgrCreator;
+ else {
+ switch (C.Opts.AnalysisConstraintsOpt) {
+ default:
+ assert(0 && "Unknown store manager.");
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \
+ case NAME##Model: CreateConstraintMgr = CREATEFN; break;
+#include "clang/Frontend/Analyses.def"
+ }
+ }
+
+
+ // Some DiagnosticClients should be created all the time instead of
+ // lazily. Create those now.
+ switch (C.Opts.AnalysisDiagOpt) {
+ default: break;
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE)\
+case PD_##NAME: if (AUTOCREATE) getPathDiagnosticClient(); break;
+#include "clang/Frontend/Analyses.def"
+ }
+ }
+
+ };
+
+} // end anonymous namespace
+
+namespace llvm {
+ template <> struct FoldingSetTrait<CodeAction> {
+ static inline void Profile(CodeAction X, FoldingSetNodeID& ID) {
+ ID.AddPointer(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(X)));
+ }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer implementation.
+//===----------------------------------------------------------------------===//
+
+void AnalysisConsumer::HandleTopLevelSingleDecl(Decl *D) {
+ switch (D->getKind()) {
+ case Decl::Function: {
+ FunctionDecl* FD = cast<FunctionDecl>(D);
+
+ if (Opts.AnalyzeSpecificFunction.size() > 0 &&
+ Opts.AnalyzeSpecificFunction != FD->getIdentifier()->getName())
+ break;
+
+ Stmt* Body = FD->getBody(*Ctx);
+ if (Body) HandleCode(FD, Body, FunctionActions);
+ break;
+ }
+
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl* MD = cast<ObjCMethodDecl>(D);
+
+ if (Opts.AnalyzeSpecificFunction.size() > 0 &&
+ Opts.AnalyzeSpecificFunction != MD->getSelector().getAsString())
+ return;
+
+ Stmt* Body = MD->getBody();
+ if (Body) HandleCode(MD, Body, ObjCMethodActions);
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
+
+ if(!TranslationUnitActions.empty()) {
+ AnalysisManager mgr(*this, Opts.AnalyzerDisplayProgress);
+ for (Actions::iterator I = TranslationUnitActions.begin(),
+ E = TranslationUnitActions.end(); I != E; ++I)
+ (*I)(mgr);
+ }
+
+ if (!ObjCImplementationActions.empty()) {
+ TranslationUnitDecl *TUD = C.getTranslationUnitDecl();
+
+ for (DeclContext::decl_iterator I = TUD->decls_begin(C),
+ E = TUD->decls_end(C);
+ I != E; ++I)
+ if (ObjCImplementationDecl* ID = dyn_cast<ObjCImplementationDecl>(*I))
+ HandleCode(ID, 0, ObjCImplementationActions);
+ }
+
+ // Delete the PathDiagnosticClient here just in case the AnalysisConsumer
+ // object doesn't get released. This will cause any side-effects in the
+ // destructor of the PathDiagnosticClient to get executed.
+ PD.reset();
+}
+
+void AnalysisConsumer::HandleCode(Decl* D, Stmt* Body, Actions& actions) {
+
+ // Don't run the actions if an error has occured with parsing the file.
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Don't run the actions on declarations in header files unless
+ // otherwise specified.
+ if (!Opts.AnalyzeAll &&
+ !Ctx->getSourceManager().isFromMainFile(D->getLocation()))
+ return;
+
+ // Create an AnalysisManager that will manage the state for analyzing
+ // this method/function.
+ AnalysisManager mgr(*this, D, Body, Opts.AnalyzerDisplayProgress);
+
+ // Dispatch on the actions.
+ for (Actions::iterator I = actions.begin(), E = actions.end(); I != E; ++I)
+ (*I)(mgr);
+}
+
+//===----------------------------------------------------------------------===//
+// Analyses
+//===----------------------------------------------------------------------===//
+
+static void ActionWarnDeadStores(AnalysisManager& mgr) {
+ if (LiveVariables* L = mgr.getLiveVariables()) {
+ BugReporter BR(mgr);
+ CheckDeadStores(*L, BR);
+ }
+}
+
+static void ActionWarnUninitVals(AnalysisManager& mgr) {
+ if (CFG* c = mgr.getCFG())
+ CheckUninitializedValues(*c, mgr.getContext(), mgr.getDiagnostic());
+}
+
+
+static void ActionGRExprEngine(AnalysisManager& mgr, GRTransferFuncs* tf,
+ bool StandardWarnings = true) {
+
+
+ llvm::OwningPtr<GRTransferFuncs> TF(tf);
+
+ // Display progress.
+ mgr.DisplayFunction();
+
+ // Construct the analysis engine.
+ LiveVariables* L = mgr.getLiveVariables();
+ if (!L) return;
+
+ GRExprEngine Eng(*mgr.getCFG(), *mgr.getCodeDecl(), mgr.getContext(), *L, mgr,
+ mgr.shouldPurgeDead(), mgr.shouldEagerlyAssume(),
+ mgr.getStoreManagerCreator(),
+ mgr.getConstraintManagerCreator());
+
+ Eng.setTransferFunctions(tf);
+
+ if (StandardWarnings) {
+ Eng.RegisterInternalChecks();
+ RegisterAppleChecks(Eng);
+ }
+
+ // Set the graph auditor.
+ llvm::OwningPtr<ExplodedNodeImpl::Auditor> Auditor;
+ if (mgr.shouldVisualizeUbigraph()) {
+ Auditor.reset(CreateUbiViz());
+ ExplodedNodeImpl::SetAuditor(Auditor.get());
+ }
+
+ // Execute the worklist algorithm.
+ Eng.ExecuteWorkList();
+
+ // Release the auditor (if any) so that it doesn't monitor the graph
+ // created BugReporter.
+ ExplodedNodeImpl::SetAuditor(0);
+
+ // Visualize the exploded graph.
+ if (mgr.shouldVisualizeGraphviz())
+ Eng.ViewGraph(mgr.shouldTrimGraph());
+
+ // Display warnings.
+ Eng.getBugReporter().FlushReports();
+}
+
+static void ActionCheckerCFRefAux(AnalysisManager& mgr, bool GCEnabled,
+ bool StandardWarnings) {
+
+ GRTransferFuncs* TF = MakeCFRefCountTF(mgr.getContext(),
+ GCEnabled,
+ mgr.getLangOptions());
+
+ ActionGRExprEngine(mgr, TF, StandardWarnings);
+}
+
+static void ActionCheckerCFRef(AnalysisManager& mgr) {
+
+ switch (mgr.getLangOptions().getGCMode()) {
+ default:
+ assert (false && "Invalid GC mode.");
+ case LangOptions::NonGC:
+ ActionCheckerCFRefAux(mgr, false, true);
+ break;
+
+ case LangOptions::GCOnly:
+ ActionCheckerCFRefAux(mgr, true, true);
+ break;
+
+ case LangOptions::HybridGC:
+ ActionCheckerCFRefAux(mgr, false, true);
+ ActionCheckerCFRefAux(mgr, true, false);
+ break;
+ }
+}
+
+static void ActionCheckerSimple(AnalysisManager& mgr) {
+ ActionGRExprEngine(mgr, MakeGRSimpleValsTF());
+}
+
+static void ActionDisplayLiveVariables(AnalysisManager& mgr) {
+ if (LiveVariables* L = mgr.getLiveVariables()) {
+ mgr.DisplayFunction();
+ L->dumpBlockLiveness(mgr.getSourceManager());
+ }
+}
+
+static void ActionCFGDump(AnalysisManager& mgr) {
+ if (CFG* c = mgr.getCFG()) {
+ mgr.DisplayFunction();
+ c->dump();
+ }
+}
+
+static void ActionCFGView(AnalysisManager& mgr) {
+ if (CFG* c = mgr.getCFG()) {
+ mgr.DisplayFunction();
+ c->viewCFG();
+ }
+}
+
+static void ActionWarnObjCDealloc(AnalysisManager& mgr) {
+ if (mgr.getLangOptions().getGCMode() == LangOptions::GCOnly)
+ return;
+
+ BugReporter BR(mgr);
+
+ CheckObjCDealloc(cast<ObjCImplementationDecl>(mgr.getCodeDecl()),
+ mgr.getLangOptions(), BR);
+}
+
+static void ActionWarnObjCUnusedIvars(AnalysisManager& mgr) {
+ BugReporter BR(mgr);
+ CheckObjCUnusedIvar(cast<ObjCImplementationDecl>(mgr.getCodeDecl()), BR);
+}
+
+static void ActionWarnObjCMethSigs(AnalysisManager& mgr) {
+ BugReporter BR(mgr);
+
+ CheckObjCInstMethSignature(cast<ObjCImplementationDecl>(mgr.getCodeDecl()),
+ BR);
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer creation.
+//===----------------------------------------------------------------------===//
+
+ASTConsumer* clang::CreateAnalysisConsumer(Diagnostic &diags, Preprocessor* pp,
+ PreprocessorFactory* ppf,
+ const LangOptions& lopts,
+ const std::string& OutDir,
+ const AnalyzerOptions& Opts) {
+
+ llvm::OwningPtr<AnalysisConsumer> C(new AnalysisConsumer(diags, pp, ppf,
+ lopts, OutDir,
+ Opts));
+
+ for (unsigned i = 0; i < Opts.AnalysisList.size(); ++i)
+ switch (Opts.AnalysisList[i]) {
+#define ANALYSIS(NAME, CMD, DESC, SCOPE)\
+ case NAME:\
+ C->add ## SCOPE ## Action(&Action ## NAME);\
+ break;
+#include "clang/Frontend/Analyses.def"
+ default: break;
+ }
+
+ // Last, disable the effects of '-Werror' when using the AnalysisConsumer.
+ diags.setWarningsAsErrors(false);
+
+ return C.take();
+}
+
+//===----------------------------------------------------------------------===//
+// Ubigraph Visualization. FIXME: Move to separate file.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class UbigraphViz : public ExplodedNodeImpl::Auditor {
+ llvm::OwningPtr<llvm::raw_ostream> Out;
+ llvm::sys::Path Dir, Filename;
+ unsigned Cntr;
+
+ typedef llvm::DenseMap<void*,unsigned> VMap;
+ VMap M;
+
+public:
+ UbigraphViz(llvm::raw_ostream* out, llvm::sys::Path& dir,
+ llvm::sys::Path& filename);
+
+ ~UbigraphViz();
+
+ virtual void AddEdge(ExplodedNodeImpl* Src, ExplodedNodeImpl* Dst);
+};
+
+} // end anonymous namespace
+
+static ExplodedNodeImpl::Auditor* CreateUbiViz() {
+ std::string ErrMsg;
+
+ llvm::sys::Path Dir = llvm::sys::Path::GetTemporaryDirectory(&ErrMsg);
+ if (!ErrMsg.empty())
+ return 0;
+
+ llvm::sys::Path Filename = Dir;
+ Filename.appendComponent("llvm_ubi");
+ Filename.makeUnique(true,&ErrMsg);
+
+ if (!ErrMsg.empty())
+ return 0;
+
+ llvm::cerr << "Writing '" << Filename << "'.\n";
+
+ llvm::OwningPtr<llvm::raw_fd_ostream> Stream;
+ std::string filename = Filename.toString();
+ Stream.reset(new llvm::raw_fd_ostream(filename.c_str(), false, ErrMsg));
+
+ if (!ErrMsg.empty())
+ return 0;
+
+ return new UbigraphViz(Stream.take(), Dir, Filename);
+}
+
+void UbigraphViz::AddEdge(ExplodedNodeImpl* Src, ExplodedNodeImpl* Dst) {
+
+ assert (Src != Dst && "Self-edges are not allowed.");
+
+ // Lookup the Src. If it is a new node, it's a root.
+ VMap::iterator SrcI= M.find(Src);
+ unsigned SrcID;
+
+ if (SrcI == M.end()) {
+ M[Src] = SrcID = Cntr++;
+ *Out << "('vertex', " << SrcID << ", ('color','#00ff00'))\n";
+ }
+ else
+ SrcID = SrcI->second;
+
+ // Lookup the Dst.
+ VMap::iterator DstI= M.find(Dst);
+ unsigned DstID;
+
+ if (DstI == M.end()) {
+ M[Dst] = DstID = Cntr++;
+ *Out << "('vertex', " << DstID << ")\n";
+ }
+ else {
+ // We have hit DstID before. Change its style to reflect a cache hit.
+ DstID = DstI->second;
+ *Out << "('change_vertex_style', " << DstID << ", 1)\n";
+ }
+
+ // Add the edge.
+ *Out << "('edge', " << SrcID << ", " << DstID
+ << ", ('arrow','true'), ('oriented', 'true'))\n";
+}
+
+UbigraphViz::UbigraphViz(llvm::raw_ostream* out, llvm::sys::Path& dir,
+ llvm::sys::Path& filename)
+ : Out(out), Dir(dir), Filename(filename), Cntr(0) {
+
+ *Out << "('vertex_style_attribute', 0, ('shape', 'icosahedron'))\n";
+ *Out << "('vertex_style', 1, 0, ('shape', 'sphere'), ('color', '#ffcc66'),"
+ " ('size', '1.5'))\n";
+}
+
+UbigraphViz::~UbigraphViz() {
+ Out.reset(0);
+ llvm::cerr << "Running 'ubiviz' program... ";
+ std::string ErrMsg;
+ llvm::sys::Path Ubiviz = llvm::sys::Program::FindProgramByName("ubiviz");
+ std::vector<const char*> args;
+ args.push_back(Ubiviz.c_str());
+ args.push_back(Filename.c_str());
+ args.push_back(0);
+
+ if (llvm::sys::Program::ExecuteAndWait(Ubiviz, &args[0],0,0,0,0,&ErrMsg)) {
+ llvm::cerr << "Error viewing graph: " << ErrMsg << "\n";
+ }
+
+ // Delete the directory.
+ Dir.eraseFromDisk(true);
+}
diff --git a/lib/Frontend/Backend.cpp b/lib/Frontend/Backend.cpp
new file mode 100644
index 0000000..44aa3a8
--- /dev/null
+++ b/lib/Frontend/Backend.cpp
@@ -0,0 +1,415 @@
+//===--- Backend.cpp - Interface to LLVM backend technologies -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompileOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Module.h"
+#include "llvm/ModuleProvider.h"
+#include "llvm/PassManager.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/System/Path.h"
+#include "llvm/System/Program.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetMachineRegistry.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/IPO.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+ class VISIBILITY_HIDDEN BackendConsumer : public ASTConsumer {
+ BackendAction Action;
+ CompileOptions CompileOpts;
+ llvm::raw_ostream *AsmOutStream;
+ ASTContext *Context;
+
+ Timer LLVMIRGeneration;
+ Timer CodeGenerationTime;
+
+ llvm::OwningPtr<CodeGenerator> Gen;
+
+ llvm::Module *TheModule;
+ llvm::TargetData *TheTargetData;
+
+ mutable llvm::ModuleProvider *ModuleProvider;
+ mutable FunctionPassManager *CodeGenPasses;
+ mutable PassManager *PerModulePasses;
+ mutable FunctionPassManager *PerFunctionPasses;
+
+ FunctionPassManager *getCodeGenPasses() const;
+ PassManager *getPerModulePasses() const;
+ FunctionPassManager *getPerFunctionPasses() const;
+
+ void CreatePasses();
+
+ /// AddEmitPasses - Add passes necessary to emit assembly or LLVM
+ /// IR.
+ ///
+ /// \return True on success. On failure \arg Error will be set to
+ /// a user readable error message.
+ bool AddEmitPasses(std::string &Error);
+
+ void EmitAssembly();
+
+ public:
+ BackendConsumer(BackendAction action, Diagnostic &Diags,
+ const LangOptions &langopts, const CompileOptions &compopts,
+ const std::string &infile, llvm::raw_ostream* OS) :
+ Action(action),
+ CompileOpts(compopts),
+ AsmOutStream(OS),
+ LLVMIRGeneration("LLVM IR Generation Time"),
+ CodeGenerationTime("Code Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts)),
+ TheModule(0), TheTargetData(0), ModuleProvider(0),
+ CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {
+
+ // Enable -time-passes if -ftime-report is enabled.
+ llvm::TimePassesIsEnabled = CompileOpts.TimePasses;
+ }
+
+ ~BackendConsumer() {
+ delete TheTargetData;
+ delete ModuleProvider;
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ }
+
+ virtual void Initialize(ASTContext &Ctx) {
+ Context = &Ctx;
+
+ if (CompileOpts.TimePasses)
+ LLVMIRGeneration.startTimer();
+
+ Gen->Initialize(Ctx);
+
+ TheModule = Gen->GetModule();
+ ModuleProvider = new ExistingModuleProvider(TheModule);
+ TheTargetData = new llvm::TargetData(Ctx.Target.getTargetDescription());
+
+ if (CompileOpts.TimePasses)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ if (CompileOpts.TimePasses)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTopLevelDecl(D);
+
+ if (CompileOpts.TimePasses)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &C) {
+ {
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (CompileOpts.TimePasses)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTranslationUnit(C);
+
+ if (CompileOpts.TimePasses)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ // EmitAssembly times and registers crash info itself.
+ EmitAssembly();
+
+ // Force a flush here in case we never get released.
+ if (AsmOutStream)
+ AsmOutStream->flush();
+ }
+
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ Gen->CompleteTentativeDefinition(D);
+ }
+ };
+}
+
+FunctionPassManager *BackendConsumer::getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new FunctionPassManager(ModuleProvider);
+ CodeGenPasses->add(new TargetData(*TheTargetData));
+ }
+
+ return CodeGenPasses;
+}
+
+PassManager *BackendConsumer::getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(*TheTargetData));
+ }
+
+ return PerModulePasses;
+}
+
+FunctionPassManager *BackendConsumer::getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new FunctionPassManager(ModuleProvider);
+ PerFunctionPasses->add(new TargetData(*TheTargetData));
+ }
+
+ return PerFunctionPasses;
+}
+
+bool BackendConsumer::AddEmitPasses(std::string &Error) {
+ if (Action == Backend_EmitNothing)
+ return true;
+
+ if (Action == Backend_EmitBC) {
+ getPerModulePasses()->add(createBitcodeWriterPass(*AsmOutStream));
+ } else if (Action == Backend_EmitLL) {
+ getPerModulePasses()->add(createPrintModulePass(AsmOutStream));
+ } else {
+ bool Fast = CompileOpts.OptimizationLevel == 0;
+
+ // Create the TargetMachine for generating code.
+ const TargetMachineRegistry::entry *TME =
+ TargetMachineRegistry::getClosestStaticTargetForModule(*TheModule, Error);
+ if (!TME) {
+ Error = std::string("Unable to get target machine: ") + Error;
+ return false;
+ }
+
+ std::string FeaturesStr;
+ if (CompileOpts.CPU.size() || CompileOpts.Features.size()) {
+ SubtargetFeatures Features;
+ Features.setCPU(CompileOpts.CPU);
+ for (std::vector<std::string>::iterator
+ it = CompileOpts.Features.begin(),
+ ie = CompileOpts.Features.end(); it != ie; ++it)
+ Features.AddFeature(*it);
+ FeaturesStr = Features.getString();
+ }
+ TargetMachine *TM = TME->CtorFn(*TheModule, FeaturesStr);
+
+ // Set register scheduler & allocation policy.
+ RegisterScheduler::setDefault(createDefaultScheduler);
+ RegisterRegAlloc::setDefault(Fast ? createLocalRegisterAllocator :
+ createLinearScanRegisterAllocator);
+
+ // From llvm-gcc:
+ // If there are passes we have to run on the entire module, we do codegen
+ // as a separate "pass" after that happens.
+ // FIXME: This is disabled right now until bugs can be worked out. Reenable
+ // this for fast -O0 compiles!
+ FunctionPassManager *PM = getCodeGenPasses();
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+ switch (CompileOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ // Normal mode, emit a .s file by running the code generator.
+ // Note, this also adds codegenerator level optimization passes.
+ switch (TM->addPassesToEmitFile(*PM, *AsmOutStream,
+ TargetMachine::AssemblyFile, OptLevel)) {
+ default:
+ case FileModel::Error:
+ Error = "Unable to interface with target machine!\n";
+ return false;
+ case FileModel::AsmFile:
+ break;
+ }
+
+ if (TM->addPassesToEmitFileFinish(*CodeGenPasses, (MachineCodeEmitter *)0,
+ OptLevel)) {
+ Error = "Unable to interface with target machine!\n";
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void BackendConsumer::CreatePasses() {
+ // In -O0 if checking is disabled, we don't even have per-function passes.
+ if (CompileOpts.VerifyModule)
+ getPerFunctionPasses()->add(createVerifierPass());
+
+ if (CompileOpts.OptimizationLevel > 0) {
+ FunctionPassManager *PM = getPerFunctionPasses();
+ PM->add(createCFGSimplificationPass());
+ if (CompileOpts.OptimizationLevel == 1)
+ PM->add(createPromoteMemoryToRegisterPass());
+ else
+ PM->add(createScalarReplAggregatesPass());
+ PM->add(createInstructionCombiningPass());
+ }
+
+ // For now we always create per module passes.
+ PassManager *PM = getPerModulePasses();
+ if (CompileOpts.OptimizationLevel > 0) {
+ if (CompileOpts.UnitAtATime)
+ PM->add(createRaiseAllocationsPass()); // call %malloc -> malloc inst
+ PM->add(createCFGSimplificationPass()); // Clean up disgusting code
+ PM->add(createPromoteMemoryToRegisterPass()); // Kill useless allocas
+ if (CompileOpts.UnitAtATime) {
+ PM->add(createGlobalOptimizerPass()); // Optimize out global vars
+ PM->add(createGlobalDCEPass()); // Remove unused fns and globs
+ PM->add(createIPConstantPropagationPass()); // IP Constant Propagation
+ PM->add(createDeadArgEliminationPass()); // Dead argument elimination
+ }
+ PM->add(createInstructionCombiningPass()); // Clean up after IPCP & DAE
+ PM->add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
+ if (CompileOpts.UnitAtATime) {
+ PM->add(createPruneEHPass()); // Remove dead EH info
+ PM->add(createFunctionAttrsPass()); // Set readonly/readnone attrs
+ }
+ if (CompileOpts.InlineFunctions)
+ PM->add(createFunctionInliningPass()); // Inline small functions
+ else
+ PM->add(createAlwaysInlinerPass()); // Respect always_inline
+ if (CompileOpts.OptimizationLevel > 2)
+ PM->add(createArgumentPromotionPass()); // Scalarize uninlined fn args
+ if (CompileOpts.SimplifyLibCalls)
+ PM->add(createSimplifyLibCallsPass()); // Library Call Optimizations
+ PM->add(createInstructionCombiningPass()); // Cleanup for scalarrepl.
+ PM->add(createJumpThreadingPass()); // Thread jumps.
+ PM->add(createCFGSimplificationPass()); // Merge & remove BBs
+ PM->add(createScalarReplAggregatesPass()); // Break up aggregate allocas
+ PM->add(createInstructionCombiningPass()); // Combine silly seq's
+ PM->add(createCondPropagationPass()); // Propagate conditionals
+ PM->add(createTailCallEliminationPass()); // Eliminate tail calls
+ PM->add(createCFGSimplificationPass()); // Merge & remove BBs
+ PM->add(createReassociatePass()); // Reassociate expressions
+ PM->add(createLoopRotatePass()); // Rotate Loop
+ PM->add(createLICMPass()); // Hoist loop invariants
+ PM->add(createLoopUnswitchPass(CompileOpts.OptimizeSize ? true : false));
+// PM->add(createLoopIndexSplitPass()); // Split loop index
+ PM->add(createInstructionCombiningPass());
+ PM->add(createIndVarSimplifyPass()); // Canonicalize indvars
+ PM->add(createLoopDeletionPass()); // Delete dead loops
+ if (CompileOpts.UnrollLoops)
+ PM->add(createLoopUnrollPass()); // Unroll small loops
+ PM->add(createInstructionCombiningPass()); // Clean up after the unroller
+ PM->add(createGVNPass()); // Remove redundancies
+ PM->add(createMemCpyOptPass()); // Remove memcpy / form memset
+ PM->add(createSCCPPass()); // Constant prop with SCCP
+
+ // Run instcombine after redundancy elimination to exploit opportunities
+ // opened up by them.
+ PM->add(createInstructionCombiningPass());
+ PM->add(createCondPropagationPass()); // Propagate conditionals
+ PM->add(createDeadStoreEliminationPass()); // Delete dead stores
+ PM->add(createAggressiveDCEPass()); // Delete dead instructions
+ PM->add(createCFGSimplificationPass()); // Merge & remove BBs
+
+ if (CompileOpts.UnitAtATime) {
+ PM->add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
+ PM->add(createDeadTypeEliminationPass()); // Eliminate dead types
+ }
+
+ if (CompileOpts.OptimizationLevel > 1 && CompileOpts.UnitAtATime)
+ PM->add(createConstantMergePass()); // Merge dup global constants
+ } else {
+ PM->add(createAlwaysInlinerPass());
+ }
+}
+
+/// EmitAssembly - Handle interaction with LLVM backend to generate
+/// actual machine code.
+void BackendConsumer::EmitAssembly() {
+ // Silently ignore if we weren't initialized for some reason.
+ if (!TheModule || !TheTargetData)
+ return;
+
+
+ TimeRegion Region(CompileOpts.TimePasses ? &CodeGenerationTime : 0);
+
+ // Make sure IR generation is happy with the module. This is
+ // released by the module provider.
+ Module *M = Gen->ReleaseModule();
+ if (!M) {
+ // The module has been released by IR gen on failures, do not
+ // double free.
+ ModuleProvider->releaseModule();
+ TheModule = 0;
+ return;
+ }
+
+ assert(TheModule == M && "Unexpected module change during IR generation");
+
+ CreatePasses();
+
+ std::string Error;
+ if (!AddEmitPasses(Error)) {
+ // FIXME: Don't fail this way.
+ llvm::cerr << "ERROR: " << Error << "\n";
+ ::exit(1);
+ }
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ PerFunctionPasses->run(*I);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*M);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+ CodeGenPasses->doInitialization();
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ CodeGenPasses->run(*I);
+ CodeGenPasses->doFinalization();
+ }
+}
+
+ASTConsumer *clang::CreateBackendConsumer(BackendAction Action,
+ Diagnostic &Diags,
+ const LangOptions &LangOpts,
+ const CompileOptions &CompileOpts,
+ const std::string& InFile,
+ llvm::raw_ostream* OS) {
+ return new BackendConsumer(Action, Diags, LangOpts, CompileOpts, InFile, OS);
+}
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
new file mode 100644
index 0000000..649f9da
--- /dev/null
+++ b/lib/Frontend/CMakeLists.txt
@@ -0,0 +1,35 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangFrontend
+ AnalysisConsumer.cpp
+ ASTConsumers.cpp
+ Backend.cpp
+ CacheTokens.cpp
+ DependencyFile.cpp
+ DiagChecker.cpp
+ DocumentXML.cpp
+ FixItRewriter.cpp
+ GeneratePCH.cpp
+ HTMLDiagnostics.cpp
+ HTMLPrint.cpp
+ InitHeaderSearch.cpp
+ InitPreprocessor.cpp
+ ManagerRegistry.cpp
+ PCHReader.cpp
+ PCHReaderDecl.cpp
+ PCHReaderStmt.cpp
+ PCHWriter.cpp
+ PCHWriterDecl.cpp
+ PCHWriterStmt.cpp
+ PlistDiagnostics.cpp
+ PrintParserCallbacks.cpp
+ PrintPreprocessedOutput.cpp
+ RewriteBlocks.cpp
+ RewriteMacros.cpp
+ RewriteObjC.cpp
+ RewriteTest.cpp
+ StmtXML.cpp
+ TextDiagnosticBuffer.cpp
+ TextDiagnosticPrinter.cpp
+ Warnings.cpp
+ )
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
new file mode 100644
index 0000000..0065828
--- /dev/null
+++ b/lib/Frontend/CacheTokens.cpp
@@ -0,0 +1,658 @@
+//===--- CacheTokens.cpp - Caching of lexer tokens for PTH support --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a possible implementation of PTH support for Clang that is
+// based on caching lexed tokens and identifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Streams.h"
+
+// FIXME: put this somewhere else?
+#ifndef S_ISDIR
+#define S_ISDIR(x) (((x)&_S_IFDIR)!=0)
+#endif
+
+using namespace clang;
+using namespace clang::io;
+
+//===----------------------------------------------------------------------===//
+// PTH-specific stuff.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN PTHEntry {
+ Offset TokenData, PPCondData;
+
+public:
+ PTHEntry() {}
+
+ PTHEntry(Offset td, Offset ppcd)
+ : TokenData(td), PPCondData(ppcd) {}
+
+ Offset getTokenOffset() const { return TokenData; }
+ Offset getPPCondTableOffset() const { return PPCondData; }
+};
+
+
+class VISIBILITY_HIDDEN PTHEntryKeyVariant {
+ union { const FileEntry* FE; const char* Path; };
+ enum { IsFE = 0x1, IsDE = 0x2, IsNoExist = 0x0 } Kind;
+ struct stat *StatBuf;
+public:
+ PTHEntryKeyVariant(const FileEntry *fe)
+ : FE(fe), Kind(IsFE), StatBuf(0) {}
+
+ PTHEntryKeyVariant(struct stat* statbuf, const char* path)
+ : Path(path), Kind(IsDE), StatBuf(new struct stat(*statbuf)) {}
+
+ PTHEntryKeyVariant(const char* path)
+ : Path(path), Kind(IsNoExist), StatBuf(0) {}
+
+ bool isFile() const { return Kind == IsFE; }
+
+ const char* getCString() const {
+ return Kind == IsFE ? FE->getName() : Path;
+ }
+
+ unsigned getKind() const { return (unsigned) Kind; }
+
+ void EmitData(llvm::raw_ostream& Out) {
+ switch (Kind) {
+ case IsFE:
+ // Emit stat information.
+ ::Emit32(Out, FE->getInode());
+ ::Emit32(Out, FE->getDevice());
+ ::Emit16(Out, FE->getFileMode());
+ ::Emit64(Out, FE->getModificationTime());
+ ::Emit64(Out, FE->getSize());
+ break;
+ case IsDE:
+ // Emit stat information.
+ ::Emit32(Out, (uint32_t) StatBuf->st_ino);
+ ::Emit32(Out, (uint32_t) StatBuf->st_dev);
+ ::Emit16(Out, (uint16_t) StatBuf->st_mode);
+ ::Emit64(Out, (uint64_t) StatBuf->st_mtime);
+ ::Emit64(Out, (uint64_t) StatBuf->st_size);
+ delete StatBuf;
+ break;
+ default:
+ break;
+ }
+ }
+
+ unsigned getRepresentationLength() const {
+ return Kind == IsNoExist ? 0 : 4 + 4 + 2 + 8 + 8;
+ }
+};
+
+class VISIBILITY_HIDDEN FileEntryPTHEntryInfo {
+public:
+ typedef PTHEntryKeyVariant key_type;
+ typedef key_type key_type_ref;
+
+ typedef PTHEntry data_type;
+ typedef const PTHEntry& data_type_ref;
+
+ static unsigned ComputeHash(PTHEntryKeyVariant V) {
+ return BernsteinHash(V.getCString());
+ }
+
+ static std::pair<unsigned,unsigned>
+ EmitKeyDataLength(llvm::raw_ostream& Out, PTHEntryKeyVariant V,
+ const PTHEntry& E) {
+
+ unsigned n = strlen(V.getCString()) + 1 + 1;
+ ::Emit16(Out, n);
+
+ unsigned m = V.getRepresentationLength() + (V.isFile() ? 4 + 4 : 0);
+ ::Emit8(Out, m);
+
+ return std::make_pair(n, m);
+ }
+
+ static void EmitKey(llvm::raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
+ // Emit the entry kind.
+ ::Emit8(Out, (unsigned) V.getKind());
+ // Emit the string.
+ Out.write(V.getCString(), n - 1);
+ }
+
+ static void EmitData(llvm::raw_ostream& Out, PTHEntryKeyVariant V,
+ const PTHEntry& E, unsigned) {
+
+
+ // For file entries emit the offsets into the PTH file for token data
+ // and the preprocessor blocks table.
+ if (V.isFile()) {
+ ::Emit32(Out, E.getTokenOffset());
+ ::Emit32(Out, E.getPPCondTableOffset());
+ }
+
+ // Emit any other data associated with the key (i.e., stat information).
+ V.EmitData(Out);
+ }
+};
+
+class OffsetOpt {
+ bool valid;
+ Offset off;
+public:
+ OffsetOpt() : valid(false) {}
+ bool hasOffset() const { return valid; }
+ Offset getOffset() const { assert(valid); return off; }
+ void setOffset(Offset o) { off = o; valid = true; }
+};
+} // end anonymous namespace
+
+typedef OnDiskChainedHashTableGenerator<FileEntryPTHEntryInfo> PTHMap;
+typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
+typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
+
+namespace {
+class VISIBILITY_HIDDEN PTHWriter {
+ IDMap IM;
+ llvm::raw_fd_ostream& Out;
+ Preprocessor& PP;
+ uint32_t idcount;
+ PTHMap PM;
+ CachedStrsTy CachedStrs;
+ Offset CurStrOffset;
+ std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
+
+ //// Get the persistent id for the given IdentifierInfo*.
+ uint32_t ResolveID(const IdentifierInfo* II);
+
+ /// Emit a token to the PTH file.
+ void EmitToken(const Token& T);
+
+ void Emit8(uint32_t V) {
+ Out << (unsigned char)(V);
+ }
+
+ void Emit16(uint32_t V) { ::Emit16(Out, V); }
+
+ void Emit24(uint32_t V) {
+ Out << (unsigned char)(V);
+ Out << (unsigned char)(V >> 8);
+ Out << (unsigned char)(V >> 16);
+ assert((V >> 24) == 0);
+ }
+
+ void Emit32(uint32_t V) { ::Emit32(Out, V); }
+
+ void EmitBuf(const char *Ptr, unsigned NumBytes) {
+ Out.write(Ptr, NumBytes);
+ }
+
+ /// EmitIdentifierTable - Emits two tables to the PTH file. The first is
+ /// a hashtable mapping from identifier strings to persistent IDs.
+ /// The second is a straight table mapping from persistent IDs to string data
+ /// (the keys of the first table).
+ std::pair<Offset, Offset> EmitIdentifierTable();
+
+ /// EmitFileTable - Emit a table mapping from file name strings to PTH
+ /// token data.
+ Offset EmitFileTable() { return PM.Emit(Out); }
+
+ PTHEntry LexTokens(Lexer& L);
+ Offset EmitCachedSpellings();
+
+public:
+ PTHWriter(llvm::raw_fd_ostream& out, Preprocessor& pp)
+ : Out(out), PP(pp), idcount(0), CurStrOffset(0) {}
+
+ PTHMap &getPM() { return PM; }
+ void GeneratePTH(const std::string *MainFile = 0);
+};
+} // end anonymous namespace
+
+uint32_t PTHWriter::ResolveID(const IdentifierInfo* II) {
+ // Null IdentifierInfo's map to the persistent ID 0.
+ if (!II)
+ return 0;
+
+ IDMap::iterator I = IM.find(II);
+ if (I != IM.end())
+ return I->second; // We've already added 1.
+
+ IM[II] = ++idcount; // Pre-increment since '0' is reserved for NULL.
+ return idcount;
+}
+
+void PTHWriter::EmitToken(const Token& T) {
+ // Emit the token kind, flags, and length.
+ Emit32(((uint32_t) T.getKind()) | ((((uint32_t) T.getFlags())) << 8)|
+ (((uint32_t) T.getLength()) << 16));
+
+ if (!T.isLiteral()) {
+ Emit32(ResolveID(T.getIdentifierInfo()));
+ } else {
+ // We cache *un-cleaned* spellings. This gives us 100% fidelity with the
+ // source code.
+ const char* s = T.getLiteralData();
+ unsigned len = T.getLength();
+
+ // Get the string entry.
+ llvm::StringMapEntry<OffsetOpt> *E = &CachedStrs.GetOrCreateValue(s, s+len);
+
+ // If this is a new string entry, bump the PTH offset.
+ if (!E->getValue().hasOffset()) {
+ E->getValue().setOffset(CurStrOffset);
+ StrEntries.push_back(E);
+ CurStrOffset += len + 1;
+ }
+
+ // Emit the relative offset into the PTH file for the spelling string.
+ Emit32(E->getValue().getOffset());
+ }
+
+ // Emit the offset into the original source file of this token so that we
+ // can reconstruct its SourceLocation.
+ Emit32(PP.getSourceManager().getFileOffset(T.getLocation()));
+}
+
+PTHEntry PTHWriter::LexTokens(Lexer& L) {
+ // Pad 0's so that we emit tokens to a 4-byte alignment.
+ // This speed up reading them back in.
+ Pad(Out, 4);
+ Offset off = (Offset) Out.tell();
+
+ // Keep track of matching '#if' ... '#endif'.
+ typedef std::vector<std::pair<Offset, unsigned> > PPCondTable;
+ PPCondTable PPCond;
+ std::vector<unsigned> PPStartCond;
+ bool ParsingPreprocessorDirective = false;
+ Token Tok;
+
+ do {
+ L.LexFromRawLexer(Tok);
+ NextToken:
+
+ if ((Tok.isAtStartOfLine() || Tok.is(tok::eof)) &&
+ ParsingPreprocessorDirective) {
+ // Insert an eom token into the token cache. It has the same
+ // position as the next token that is not on the same line as the
+ // preprocessor directive. Observe that we continue processing
+ // 'Tok' when we exit this branch.
+ Token Tmp = Tok;
+ Tmp.setKind(tok::eom);
+ Tmp.clearFlag(Token::StartOfLine);
+ Tmp.setIdentifierInfo(0);
+ EmitToken(Tmp);
+ ParsingPreprocessorDirective = false;
+ }
+
+ if (Tok.is(tok::identifier)) {
+ Tok.setIdentifierInfo(PP.LookUpIdentifierInfo(Tok));
+ EmitToken(Tok);
+ continue;
+ }
+
+ if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) {
+ // Special processing for #include. Store the '#' token and lex
+ // the next token.
+ assert(!ParsingPreprocessorDirective);
+ Offset HashOff = (Offset) Out.tell();
+ EmitToken(Tok);
+
+ // Get the next token.
+ L.LexFromRawLexer(Tok);
+
+ // If we see the start of line, then we had a null directive "#".
+ if (Tok.isAtStartOfLine())
+ goto NextToken;
+
+ // Did we see 'include'/'import'/'include_next'?
+ if (Tok.isNot(tok::identifier)) {
+ EmitToken(Tok);
+ continue;
+ }
+
+ IdentifierInfo* II = PP.LookUpIdentifierInfo(Tok);
+ Tok.setIdentifierInfo(II);
+ tok::PPKeywordKind K = II->getPPKeywordID();
+
+ ParsingPreprocessorDirective = true;
+
+ switch (K) {
+ case tok::pp_not_keyword:
+ // Invalid directives "#foo" can occur in #if 0 blocks etc, just pass
+ // them through.
+ default:
+ break;
+
+ case tok::pp_include:
+ case tok::pp_import:
+ case tok::pp_include_next: {
+ // Save the 'include' token.
+ EmitToken(Tok);
+ // Lex the next token as an include string.
+ L.setParsingPreprocessorDirective(true);
+ L.LexIncludeFilename(Tok);
+ L.setParsingPreprocessorDirective(false);
+ assert(!Tok.isAtStartOfLine());
+ if (Tok.is(tok::identifier))
+ Tok.setIdentifierInfo(PP.LookUpIdentifierInfo(Tok));
+
+ break;
+ }
+ case tok::pp_if:
+ case tok::pp_ifdef:
+ case tok::pp_ifndef: {
+ // Add an entry for '#if' and friends. We initially set the target
+ // index to 0. This will get backpatched when we hit #endif.
+ PPStartCond.push_back(PPCond.size());
+ PPCond.push_back(std::make_pair(HashOff, 0U));
+ break;
+ }
+ case tok::pp_endif: {
+ // Add an entry for '#endif'. We set the target table index to itself.
+ // This will later be set to zero when emitting to the PTH file. We
+ // use 0 for uninitialized indices because that is easier to debug.
+ unsigned index = PPCond.size();
+ // Backpatch the opening '#if' entry.
+ assert(!PPStartCond.empty());
+ assert(PPCond.size() > PPStartCond.back());
+ assert(PPCond[PPStartCond.back()].second == 0);
+ PPCond[PPStartCond.back()].second = index;
+ PPStartCond.pop_back();
+ // Add the new entry to PPCond.
+ PPCond.push_back(std::make_pair(HashOff, index));
+ EmitToken(Tok);
+
+ // Some files have gibberish on the same line as '#endif'.
+ // Discard these tokens.
+ do
+ L.LexFromRawLexer(Tok);
+ while (Tok.isNot(tok::eof) && !Tok.isAtStartOfLine());
+ // We have the next token in hand.
+ // Don't immediately lex the next one.
+ goto NextToken;
+ }
+ case tok::pp_elif:
+ case tok::pp_else: {
+ // Add an entry for #elif or #else.
+ // This serves as both a closing and opening of a conditional block.
+ // This means that its entry will get backpatched later.
+ unsigned index = PPCond.size();
+ // Backpatch the previous '#if' entry.
+ assert(!PPStartCond.empty());
+ assert(PPCond.size() > PPStartCond.back());
+ assert(PPCond[PPStartCond.back()].second == 0);
+ PPCond[PPStartCond.back()].second = index;
+ PPStartCond.pop_back();
+ // Now add '#elif' as a new block opening.
+ PPCond.push_back(std::make_pair(HashOff, 0U));
+ PPStartCond.push_back(index);
+ break;
+ }
+ }
+ }
+
+ EmitToken(Tok);
+ }
+ while (Tok.isNot(tok::eof));
+
+ assert(PPStartCond.empty() && "Error: imblanced preprocessor conditionals.");
+
+ // Next write out PPCond.
+ Offset PPCondOff = (Offset) Out.tell();
+
+ // Write out the size of PPCond so that clients can identifer empty tables.
+ Emit32(PPCond.size());
+
+ for (unsigned i = 0, e = PPCond.size(); i!=e; ++i) {
+ Emit32(PPCond[i].first - off);
+ uint32_t x = PPCond[i].second;
+ assert(x != 0 && "PPCond entry not backpatched.");
+ // Emit zero for #endifs. This allows us to do checking when
+ // we read the PTH file back in.
+ Emit32(x == i ? 0 : x);
+ }
+
+ return PTHEntry(off, PPCondOff);
+}
+
+Offset PTHWriter::EmitCachedSpellings() {
+ // Write each cached strings to the PTH file.
+ Offset SpellingsOff = Out.tell();
+
+ for (std::vector<llvm::StringMapEntry<OffsetOpt>*>::iterator
+ I = StrEntries.begin(), E = StrEntries.end(); I!=E; ++I)
+ EmitBuf((*I)->getKeyData(), (*I)->getKeyLength()+1 /*nul included*/);
+
+ return SpellingsOff;
+}
+
+void PTHWriter::GeneratePTH(const std::string *MainFile) {
+ // Generate the prologue.
+ Out << "cfe-pth";
+ Emit32(PTHManager::Version);
+
+ // Leave 4 words for the prologue.
+ Offset PrologueOffset = Out.tell();
+ for (unsigned i = 0; i < 4; ++i)
+ Emit32(0);
+
+ // Write the name of the MainFile.
+ if (MainFile && !MainFile->empty()) {
+ Emit16(MainFile->length());
+ EmitBuf(MainFile->data(), MainFile->length());
+ } else {
+ // String with 0 bytes.
+ Emit16(0);
+ }
+ Emit8(0);
+
+ // Iterate over all the files in SourceManager. Create a lexer
+ // for each file and cache the tokens.
+ SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LOpts = PP.getLangOptions();
+
+ for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
+ E = SM.fileinfo_end(); I != E; ++I) {
+ const SrcMgr::ContentCache &C = *I->second;
+ const FileEntry *FE = C.Entry;
+
+ // FIXME: Handle files with non-absolute paths.
+ llvm::sys::Path P(FE->getName());
+ if (!P.isAbsolute())
+ continue;
+
+ const llvm::MemoryBuffer *B = C.getBuffer();
+ if (!B) continue;
+
+ FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
+ Lexer L(FID, SM, LOpts);
+ PM.insert(FE, LexTokens(L));
+ }
+
+ // Write out the identifier table.
+ const std::pair<Offset,Offset> &IdTableOff = EmitIdentifierTable();
+
+ // Write out the cached strings table.
+ Offset SpellingOff = EmitCachedSpellings();
+
+ // Write out the file table.
+ Offset FileTableOff = EmitFileTable();
+
+ // Finally, write the prologue.
+ Out.seek(PrologueOffset);
+ Emit32(IdTableOff.first);
+ Emit32(IdTableOff.second);
+ Emit32(FileTableOff);
+ Emit32(SpellingOff);
+}
+
+namespace {
+/// StatListener - A simple "interpose" object used to monitor stat calls
+/// invoked by FileManager while processing the original sources used
+/// as input to PTH generation. StatListener populates the PTHWriter's
+/// file map with stat information for directories as well as negative stats.
+/// Stat information for files are populated elsewhere.
+class StatListener : public StatSysCallCache {
+ PTHMap &PM;
+public:
+ StatListener(PTHMap &pm) : PM(pm) {}
+ ~StatListener() {}
+
+ int stat(const char *path, struct stat *buf) {
+ int result = ::stat(path, buf);
+
+ if (result != 0) // Failed 'stat'.
+ PM.insert(path, PTHEntry());
+ else if (S_ISDIR(buf->st_mode)) {
+ // Only cache directories with absolute paths.
+ if (!llvm::sys::Path(path).isAbsolute())
+ return result;
+
+ PM.insert(PTHEntryKeyVariant(buf, path), PTHEntry());
+ }
+
+ return result;
+ }
+};
+} // end anonymous namespace
+
+
+void clang::CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS) {
+ // Get the name of the main file.
+ const SourceManager &SrcMgr = PP.getSourceManager();
+ const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
+ llvm::sys::Path MainFilePath(MainFile->getName());
+ std::string MainFileName;
+
+ if (!MainFilePath.isAbsolute()) {
+ llvm::sys::Path P = llvm::sys::Path::GetCurrentDirectory();
+ P.appendComponent(MainFilePath.toString());
+ MainFileName = P.toString();
+ } else {
+ MainFileName = MainFilePath.toString();
+ }
+
+ // Create the PTHWriter.
+ PTHWriter PW(*OS, PP);
+
+ // Install the 'stat' system call listener in the FileManager.
+ PP.getFileManager().setStatCache(new StatListener(PW.getPM()));
+
+ // Lex through the entire file. This will populate SourceManager with
+ // all of the header information.
+ Token Tok;
+ PP.EnterMainSourceFile();
+ do { PP.Lex(Tok); } while (Tok.isNot(tok::eof));
+
+ // Generate the PTH file.
+ PP.getFileManager().setStatCache(0);
+ PW.GeneratePTH(&MainFileName);
+}
+
+//===----------------------------------------------------------------------===//
+
+class PTHIdKey {
+public:
+ const IdentifierInfo* II;
+ uint32_t FileOffset;
+};
+
+namespace {
+class VISIBILITY_HIDDEN PTHIdentifierTableTrait {
+public:
+ typedef PTHIdKey* key_type;
+ typedef key_type key_type_ref;
+
+ typedef uint32_t data_type;
+ typedef data_type data_type_ref;
+
+ static unsigned ComputeHash(PTHIdKey* key) {
+ return BernsteinHash(key->II->getName());
+ }
+
+ static std::pair<unsigned,unsigned>
+ EmitKeyDataLength(llvm::raw_ostream& Out, const PTHIdKey* key, uint32_t) {
+ unsigned n = strlen(key->II->getName()) + 1;
+ ::Emit16(Out, n);
+ return std::make_pair(n, sizeof(uint32_t));
+ }
+
+ static void EmitKey(llvm::raw_ostream& Out, PTHIdKey* key, unsigned n) {
+ // Record the location of the key data. This is used when generating
+ // the mapping from persistent IDs to strings.
+ key->FileOffset = Out.tell();
+ Out.write(key->II->getName(), n);
+ }
+
+ static void EmitData(llvm::raw_ostream& Out, PTHIdKey*, uint32_t pID,
+ unsigned) {
+ ::Emit32(Out, pID);
+ }
+};
+} // end anonymous namespace
+
+/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
+/// a hashtable mapping from identifier strings to persistent IDs. The second
+/// is a straight table mapping from persistent IDs to string data (the
+/// keys of the first table).
+///
+std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
+ // Build two maps:
+ // (1) an inverse map from persistent IDs -> (IdentifierInfo*,Offset)
+ // (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
+
+ // Note that we use 'calloc', so all the bytes are 0.
+ PTHIdKey *IIDMap = (PTHIdKey*)calloc(idcount, sizeof(PTHIdKey));
+
+ // Create the hashtable.
+ OnDiskChainedHashTableGenerator<PTHIdentifierTableTrait> IIOffMap;
+
+ // Generate mapping from persistent IDs -> IdentifierInfo*.
+ for (IDMap::iterator I = IM.begin(), E = IM.end(); I != E; ++I) {
+ // Decrement by 1 because we are using a vector for the lookup and
+ // 0 is reserved for NULL.
+ assert(I->second > 0);
+ assert(I->second-1 < idcount);
+ unsigned idx = I->second-1;
+
+ // Store the mapping from persistent ID to IdentifierInfo*
+ IIDMap[idx].II = I->first;
+
+ // Store the reverse mapping in a hashtable.
+ IIOffMap.insert(&IIDMap[idx], I->second);
+ }
+
+ // Write out the inverse map first. This causes the PCIDKey entries to
+ // record PTH file offsets for the string data. This is used to write
+ // the second table.
+ Offset StringTableOffset = IIOffMap.Emit(Out);
+
+ // Now emit the table mapping from persistent IDs to PTH file offsets.
+ Offset IDOff = Out.tell();
+ Emit32(idcount); // Emit the number of identifiers.
+ for (unsigned i = 0 ; i < idcount; ++i)
+ Emit32(IIDMap[i].FileOffset);
+
+ // Finally, release the inverse map.
+ free(IIDMap);
+
+ return std::make_pair(IDOff, StringTableOffset);
+}
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
new file mode 100644
index 0000000..c8a654c
--- /dev/null
+++ b/lib/Frontend/DependencyFile.cpp
@@ -0,0 +1,169 @@
+//===--- DependencyFile.cpp - Generate dependency file --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code generates dependency files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+using namespace clang;
+
+namespace {
+class VISIBILITY_HIDDEN DependencyFileCallback : public PPCallbacks {
+ std::vector<std::string> Files;
+ llvm::StringSet<> FilesSet;
+ const Preprocessor *PP;
+ std::vector<std::string> Targets;
+ llvm::raw_ostream *OS;
+ bool IncludeSystemHeaders;
+ bool PhonyTarget;
+private:
+ bool FileMatchesDepCriteria(const char *Filename,
+ SrcMgr::CharacteristicKind FileType);
+ void OutputDependencyFile();
+
+public:
+ DependencyFileCallback(const Preprocessor *_PP,
+ llvm::raw_ostream *_OS,
+ const std::vector<std::string> &_Targets,
+ bool _IncludeSystemHeaders,
+ bool _PhonyTarget)
+ : PP(_PP), Targets(_Targets), OS(_OS),
+ IncludeSystemHeaders(_IncludeSystemHeaders), PhonyTarget(_PhonyTarget) {}
+
+ ~DependencyFileCallback() {
+ OutputDependencyFile();
+ OS->flush();
+ delete OS;
+ }
+
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType);
+};
+}
+
+
+
+void clang::AttachDependencyFileGen(Preprocessor *PP, llvm::raw_ostream *OS,
+ std::vector<std::string> &Targets,
+ bool IncludeSystemHeaders,
+ bool PhonyTarget) {
+ assert(!Targets.empty() && "Target required for dependency generation");
+
+ DependencyFileCallback *PPDep =
+ new DependencyFileCallback(PP, OS, Targets, IncludeSystemHeaders,
+ PhonyTarget);
+ PP->setPPCallbacks(PPDep);
+}
+
+/// FileMatchesDepCriteria - Determine whether the given Filename should be
+/// considered as a dependency.
+bool DependencyFileCallback::FileMatchesDepCriteria(const char *Filename,
+ SrcMgr::CharacteristicKind FileType) {
+ if (strcmp("<built-in>", Filename) == 0)
+ return false;
+
+ if (IncludeSystemHeaders)
+ return true;
+
+ return FileType == SrcMgr::C_User;
+}
+
+void DependencyFileCallback::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType) {
+ if (Reason != PPCallbacks::EnterFile)
+ return;
+
+ // Dependency generation really does want to go all the way to the
+ // file entry for a source location to find out what is depended on.
+ // We do not want #line markers to affect dependency generation!
+ SourceManager &SM = PP->getSourceManager();
+
+ const FileEntry *FE =
+ SM.getFileEntryForID(SM.getFileID(SM.getInstantiationLoc(Loc)));
+ if (FE == 0) return;
+
+ const char *Filename = FE->getName();
+ if (!FileMatchesDepCriteria(Filename, FileType))
+ return;
+
+ // Remove leading "./"
+ if (Filename[0] == '.' && Filename[1] == '/')
+ Filename = &Filename[2];
+
+ if (FilesSet.insert(Filename))
+ Files.push_back(Filename);
+}
+
+void DependencyFileCallback::OutputDependencyFile() {
+ // Write out the dependency targets, trying to avoid overly long
+ // lines when possible. We try our best to emit exactly the same
+ // dependency file as GCC (4.2), assuming the included files are the
+ // same.
+ const unsigned MaxColumns = 75;
+ unsigned Columns = 0;
+
+ for (std::vector<std::string>::iterator
+ I = Targets.begin(), E = Targets.end(); I != E; ++I) {
+ unsigned N = I->length();
+ if (Columns == 0) {
+ Columns += N;
+ *OS << *I;
+ } else if (Columns + N + 2 > MaxColumns) {
+ Columns = N + 2;
+ *OS << " \\\n " << *I;
+ } else {
+ Columns += N + 1;
+ *OS << ' ' << *I;
+ }
+ }
+
+ *OS << ':';
+ Columns += 1;
+
+ // Now add each dependency in the order it was seen, but avoiding
+ // duplicates.
+ for (std::vector<std::string>::iterator I = Files.begin(),
+ E = Files.end(); I != E; ++I) {
+ // Start a new line if this would exceed the column limit. Make
+ // sure to leave space for a trailing " \" in case we need to
+ // break the line on the next iteration.
+ unsigned N = I->length();
+ if (Columns + (N + 1) + 2 > MaxColumns) {
+ *OS << " \\\n ";
+ Columns = 2;
+ }
+ *OS << ' ' << *I;
+ Columns += N + 1;
+ }
+ *OS << '\n';
+
+ // Create phony targets if requested.
+ if (PhonyTarget) {
+ // Skip the first entry, this is always the input file itself.
+ for (std::vector<std::string>::iterator I = Files.begin() + 1,
+ E = Files.end(); I != E; ++I) {
+ *OS << '\n';
+ *OS << *I << ":\n";
+ }
+ }
+}
+
diff --git a/lib/Frontend/DiagChecker.cpp b/lib/Frontend/DiagChecker.cpp
new file mode 100644
index 0000000..c0f5d14
--- /dev/null
+++ b/lib/Frontend/DiagChecker.cpp
@@ -0,0 +1,302 @@
+//===--- DiagChecker.cpp - Diagnostic Checking Functions ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Process the input files and check that the diagnostic messages are expected.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Sema/ParseAST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include <cstdio>
+using namespace clang;
+
+typedef TextDiagnosticBuffer::DiagList DiagList;
+typedef TextDiagnosticBuffer::const_iterator const_diag_iterator;
+
+static void EmitError(Preprocessor &PP, SourceLocation Pos, const char *String){
+ unsigned ID = PP.getDiagnostics().getCustomDiagID(Diagnostic::Error, String);
+ PP.Diag(Pos, ID);
+}
+
+
+// USING THE DIAGNOSTIC CHECKER:
+//
+// Indicating that a line expects an error or a warning is simple. Put a comment
+// on the line that has the diagnostic, use "expected-{error,warning}" to tag
+// if it's an expected error or warning, and place the expected text between {{
+// and }} markers. The full text doesn't have to be included, only enough to
+// ensure that the correct diagnostic was emitted.
+//
+// Here's an example:
+//
+// int A = B; // expected-error {{use of undeclared identifier 'B'}}
+//
+// You can place as many diagnostics on one line as you wish. To make the code
+// more readable, you can use slash-newline to separate out the diagnostics.
+//
+// The simple syntax above allows each specification to match exactly one error.
+// You can use the extended syntax to customize this. The extended syntax is
+// "expected-<type> <n> {{diag text}}", where <type> is one of "error",
+// "warning" or "note", and <n> is a positive integer. This allows the
+// diagnostic to appear as many times as specified. Example:
+//
+// void f(); // expected-note 2 {{previous declaration is here}}
+//
+
+/// FindDiagnostics - Go through the comment and see if it indicates expected
+/// diagnostics. If so, then put them in a diagnostic list.
+///
+static void FindDiagnostics(const char *CommentStart, unsigned CommentLen,
+ DiagList &ExpectedDiags,
+ Preprocessor &PP, SourceLocation Pos,
+ const char *ExpectedStr) {
+ const char *CommentEnd = CommentStart+CommentLen;
+ unsigned ExpectedStrLen = strlen(ExpectedStr);
+
+ // Find all expected-foo diagnostics in the string and add them to
+ // ExpectedDiags.
+ while (CommentStart != CommentEnd) {
+ CommentStart = std::find(CommentStart, CommentEnd, 'e');
+ if (unsigned(CommentEnd-CommentStart) < ExpectedStrLen) return;
+
+ // If this isn't expected-foo, ignore it.
+ if (memcmp(CommentStart, ExpectedStr, ExpectedStrLen)) {
+ ++CommentStart;
+ continue;
+ }
+
+ CommentStart += ExpectedStrLen;
+
+ // Skip whitespace.
+ while (CommentStart != CommentEnd &&
+ isspace(CommentStart[0]))
+ ++CommentStart;
+
+ // Default, if we find the '{' now, is 1 time.
+ int Times = 1;
+ int Temp = 0;
+ // In extended syntax, there could be a digit now.
+ while (CommentStart != CommentEnd &&
+ CommentStart[0] >= '0' && CommentStart[0] <= '9') {
+ Temp *= 10;
+ Temp += CommentStart[0] - '0';
+ ++CommentStart;
+ }
+ if (Temp > 0)
+ Times = Temp;
+
+ // Skip whitespace again.
+ while (CommentStart != CommentEnd &&
+ isspace(CommentStart[0]))
+ ++CommentStart;
+
+ // We should have a {{ now.
+ if (CommentEnd-CommentStart < 2 ||
+ CommentStart[0] != '{' || CommentStart[1] != '{') {
+ if (std::find(CommentStart, CommentEnd, '{') != CommentEnd)
+ EmitError(PP, Pos, "bogus characters before '{{' in expected string");
+ else
+ EmitError(PP, Pos, "cannot find start ('{{') of expected string");
+ return;
+ }
+ CommentStart += 2;
+
+ // Find the }}.
+ const char *ExpectedEnd = CommentStart;
+ while (1) {
+ ExpectedEnd = std::find(ExpectedEnd, CommentEnd, '}');
+ if (CommentEnd-ExpectedEnd < 2) {
+ EmitError(PP, Pos, "cannot find end ('}}') of expected string");
+ return;
+ }
+
+ if (ExpectedEnd[1] == '}')
+ break;
+
+ ++ExpectedEnd; // Skip over singular }'s
+ }
+
+ std::string Msg(CommentStart, ExpectedEnd);
+ std::string::size_type FindPos;
+ while ((FindPos = Msg.find("\\n")) != std::string::npos)
+ Msg.replace(FindPos, 2, "\n");
+ // Add is possibly multiple times.
+ for (int i = 0; i < Times; ++i)
+ ExpectedDiags.push_back(std::make_pair(Pos, Msg));
+
+ CommentStart = ExpectedEnd;
+ }
+}
+
+/// FindExpectedDiags - Lex the main source file to find all of the
+// expected errors and warnings.
+static void FindExpectedDiags(Preprocessor &PP,
+ DiagList &ExpectedErrors,
+ DiagList &ExpectedWarnings,
+ DiagList &ExpectedNotes) {
+ // Create a raw lexer to pull all the comments out of the main file. We don't
+ // want to look in #include'd headers for expected-error strings.
+ FileID FID = PP.getSourceManager().getMainFileID();
+
+ // Create a lexer to lex all the tokens of the main file in raw mode.
+ Lexer RawLex(FID, PP.getSourceManager(), PP.getLangOptions());
+
+ // Return comments as tokens, this is how we find expected diagnostics.
+ RawLex.SetCommentRetentionState(true);
+
+ Token Tok;
+ Tok.setKind(tok::comment);
+ while (Tok.isNot(tok::eof)) {
+ RawLex.Lex(Tok);
+ if (!Tok.is(tok::comment)) continue;
+
+ std::string Comment = PP.getSpelling(Tok);
+ if (Comment.empty()) continue;
+
+
+ // Find all expected errors.
+ FindDiagnostics(&Comment[0], Comment.size(), ExpectedErrors, PP,
+ Tok.getLocation(), "expected-error");
+
+ // Find all expected warnings.
+ FindDiagnostics(&Comment[0], Comment.size(), ExpectedWarnings, PP,
+ Tok.getLocation(), "expected-warning");
+
+ // Find all expected notes.
+ FindDiagnostics(&Comment[0], Comment.size(), ExpectedNotes, PP,
+ Tok.getLocation(), "expected-note");
+ };
+}
+
+/// PrintProblem - This takes a diagnostic map of the delta between expected and
+/// seen diagnostics. If there's anything in it, then something unexpected
+/// happened. Print the map out in a nice format and return "true". If the map
+/// is empty and we're not going to print things, then return "false".
+///
+static bool PrintProblem(SourceManager &SourceMgr,
+ const_diag_iterator diag_begin,
+ const_diag_iterator diag_end,
+ const char *Msg) {
+ if (diag_begin == diag_end) return false;
+
+ fprintf(stderr, "%s\n", Msg);
+
+ for (const_diag_iterator I = diag_begin, E = diag_end; I != E; ++I)
+ fprintf(stderr, " Line %d: %s\n",
+ SourceMgr.getInstantiationLineNumber(I->first),
+ I->second.c_str());
+
+ return true;
+}
+
+/// CompareDiagLists - Compare two diagnostic lists and return the difference
+/// between them.
+///
+static bool CompareDiagLists(SourceManager &SourceMgr,
+ const_diag_iterator d1_begin,
+ const_diag_iterator d1_end,
+ const_diag_iterator d2_begin,
+ const_diag_iterator d2_end,
+ const char *MsgLeftOnly,
+ const char *MsgRightOnly) {
+ DiagList LeftOnly;
+ DiagList Left(d1_begin, d1_end);
+ DiagList Right(d2_begin, d2_end);
+
+ for (const_diag_iterator I = Left.begin(), E = Left.end(); I != E; ++I) {
+ unsigned LineNo1 = SourceMgr.getInstantiationLineNumber(I->first);
+ const std::string &Diag1 = I->second;
+
+ DiagList::iterator II, IE;
+ for (II = Right.begin(), IE = Right.end(); II != IE; ++II) {
+ unsigned LineNo2 = SourceMgr.getInstantiationLineNumber(II->first);
+ if (LineNo1 != LineNo2) continue;
+
+ const std::string &Diag2 = II->second;
+ if (Diag2.find(Diag1) != std::string::npos ||
+ Diag1.find(Diag2) != std::string::npos) {
+ break;
+ }
+ }
+ if (II == IE) {
+ // Not found.
+ LeftOnly.push_back(*I);
+ } else {
+ // Found. The same cannot be found twice.
+ Right.erase(II);
+ }
+ }
+ // Now all that's left in Right are those that were not matched.
+
+ return PrintProblem(SourceMgr, LeftOnly.begin(), LeftOnly.end(), MsgLeftOnly)
+ | PrintProblem(SourceMgr, Right.begin(), Right.end(), MsgRightOnly);
+}
+
+/// CheckResults - This compares the expected results to those that
+/// were actually reported. It emits any discrepencies. Return "true" if there
+/// were problems. Return "false" otherwise.
+///
+static bool CheckResults(Preprocessor &PP,
+ const DiagList &ExpectedErrors,
+ const DiagList &ExpectedWarnings,
+ const DiagList &ExpectedNotes) {
+ const DiagnosticClient *DiagClient = PP.getDiagnostics().getClient();
+ assert(DiagClient != 0 &&
+ "DiagChecker requires a valid TextDiagnosticBuffer");
+ const TextDiagnosticBuffer &Diags =
+ static_cast<const TextDiagnosticBuffer&>(*DiagClient);
+ SourceManager &SourceMgr = PP.getSourceManager();
+
+ // We want to capture the delta between what was expected and what was
+ // seen.
+ //
+ // Expected \ Seen - set expected but not seen
+ // Seen \ Expected - set seen but not expected
+ bool HadProblem = false;
+
+ // See if there are error mismatches.
+ HadProblem |= CompareDiagLists(SourceMgr,
+ ExpectedErrors.begin(), ExpectedErrors.end(),
+ Diags.err_begin(), Diags.err_end(),
+ "Errors expected but not seen:",
+ "Errors seen but not expected:");
+
+ // See if there are warning mismatches.
+ HadProblem |= CompareDiagLists(SourceMgr,
+ ExpectedWarnings.begin(),
+ ExpectedWarnings.end(),
+ Diags.warn_begin(), Diags.warn_end(),
+ "Warnings expected but not seen:",
+ "Warnings seen but not expected:");
+
+ // See if there are note mismatches.
+ HadProblem |= CompareDiagLists(SourceMgr,
+ ExpectedNotes.begin(),
+ ExpectedNotes.end(),
+ Diags.note_begin(), Diags.note_end(),
+ "Notes expected but not seen:",
+ "Notes seen but not expected:");
+
+ return HadProblem;
+}
+
+
+/// CheckDiagnostics - Gather the expected diagnostics and check them.
+bool clang::CheckDiagnostics(Preprocessor &PP) {
+ // Gather the set of expected diagnostics.
+ DiagList ExpectedErrors, ExpectedWarnings, ExpectedNotes;
+ FindExpectedDiags(PP, ExpectedErrors, ExpectedWarnings, ExpectedNotes);
+
+ // Check that the expected diagnostics occurred.
+ return CheckResults(PP, ExpectedErrors, ExpectedWarnings, ExpectedNotes);
+}
diff --git a/lib/Frontend/DocumentXML.cpp b/lib/Frontend/DocumentXML.cpp
new file mode 100644
index 0000000..7562d2a
--- /dev/null
+++ b/lib/Frontend/DocumentXML.cpp
@@ -0,0 +1,579 @@
+//===--- DocumentXML.cpp - XML document for ASTs --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the XML document class, which provides the means to
+// dump out the AST in a XML form that exposes type details and other fields.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/DocumentXML.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+
+namespace clang {
+
+//---------------------------------------------------------
+struct DocumentXML::NodeXML
+{
+ std::string Name;
+ NodeXML* Parent;
+
+ NodeXML(const std::string& name, NodeXML* parent) :
+ Name(name),
+ Parent(parent)
+ {}
+};
+
+//---------------------------------------------------------
+DocumentXML::DocumentXML(const std::string& rootName, llvm::raw_ostream& out) :
+ Root(new NodeXML(rootName, 0)),
+ CurrentNode(Root),
+ Out(out),
+ Ctx(0),
+ CurrentIndent(0),
+ HasCurrentNodeSubNodes(false)
+{
+ Out << "<?xml version=\"1.0\"?>\n<" << rootName;
+}
+
+//---------------------------------------------------------
+DocumentXML::~DocumentXML()
+{
+ assert(CurrentNode == Root && "not completely backtracked");
+ delete Root;
+}
+
+//---------------------------------------------------------
+DocumentXML& DocumentXML::addSubNode(const std::string& name)
+{
+ if (!HasCurrentNodeSubNodes)
+ {
+ Out << ">\n";
+ }
+ CurrentNode = new NodeXML(name, CurrentNode);
+ HasCurrentNodeSubNodes = false;
+ CurrentIndent += 2;
+ Indent();
+ Out << "<" << CurrentNode->Name;
+ return *this;
+}
+
+//---------------------------------------------------------
+void DocumentXML::Indent()
+{
+ for (int i = 0; i < CurrentIndent; ++i)
+ Out << ' ';
+}
+
+//---------------------------------------------------------
+DocumentXML& DocumentXML::toParent()
+{
+ assert(CurrentNode != Root && "to much backtracking");
+
+ if (HasCurrentNodeSubNodes)
+ {
+ Indent();
+ Out << "</" << CurrentNode->Name << ">\n";
+ }
+ else
+ {
+ Out << "/>\n";
+ }
+ NodeXML* NodeToDelete = CurrentNode;
+ CurrentNode = CurrentNode->Parent;
+ delete NodeToDelete;
+ HasCurrentNodeSubNodes = true;
+ CurrentIndent -= 2;
+ return *this;
+}
+
+//---------------------------------------------------------
+namespace {
+
+enum tIdType { ID_NORMAL, ID_FILE, ID_LAST };
+
+unsigned getNewId(tIdType idType)
+{
+ static unsigned int idCounts[ID_LAST] = { 0 };
+ return ++idCounts[idType];
+}
+
+//---------------------------------------------------------
+inline std::string getPrefixedId(unsigned uId, tIdType idType)
+{
+ static const char idPrefix[ID_LAST] = { '_', 'f' };
+ char buffer[20];
+ char* BufPtr = llvm::utohex_buffer(uId, buffer + 20);
+ *--BufPtr = idPrefix[idType];
+ return BufPtr;
+}
+
+//---------------------------------------------------------
+template<class T, class V>
+bool addToMap(T& idMap, const V& value, tIdType idType = ID_NORMAL)
+{
+ typename T::iterator i = idMap.find(value);
+ bool toAdd = i == idMap.end();
+ if (toAdd)
+ {
+ idMap.insert(typename T::value_type(value, getNewId(idType)));
+ }
+ return toAdd;
+}
+
+} // anon NS
+
+//---------------------------------------------------------
+std::string DocumentXML::escapeString(const char* pStr, std::string::size_type len)
+{
+ std::string value;
+ value.reserve(len + 1);
+ char buffer[16];
+ for (unsigned i = 0; i < len; ++i) {
+ switch (char C = pStr[i]) {
+ default:
+ if (isprint(C))
+ value += C;
+ else
+ {
+ sprintf(buffer, "\\%03o", C);
+ value += buffer;
+ }
+ break;
+
+ case '\n': value += "\\n"; break;
+ case '\t': value += "\\t"; break;
+ case '\a': value += "\\a"; break;
+ case '\b': value += "\\b"; break;
+ case '\r': value += "\\r"; break;
+
+ case '&': value += "&amp;"; break;
+ case '<': value += "&lt;"; break;
+ case '>': value += "&gt;"; break;
+ case '"': value += "&quot;"; break;
+ case '\'': value += "&apos;"; break;
+
+ }
+ }
+ return value;
+}
+
+//---------------------------------------------------------
+void DocumentXML::finalize()
+{
+ assert(CurrentNode == Root && "not completely backtracked");
+
+ addSubNode("ReferenceSection");
+ addSubNode("Types");
+
+ for (XML::IdMap<QualType>::iterator i = Types.begin(), e = Types.end(); i != e; ++i)
+ {
+ if (i->first.getCVRQualifiers() != 0)
+ {
+ addSubNode("CvQualifiedType");
+ addAttribute("id", getPrefixedId(i->second, ID_NORMAL));
+ addAttribute("type", getPrefixedId(BasicTypes[i->first.getTypePtr()], ID_NORMAL));
+ if (i->first.isConstQualified()) addAttribute("const", "1");
+ if (i->first.isVolatileQualified()) addAttribute("volatile", "1");
+ if (i->first.isRestrictQualified()) addAttribute("restrict", "1");
+ toParent();
+ }
+ }
+
+ for (XML::IdMap<const Type*>::iterator i = BasicTypes.begin(), e = BasicTypes.end(); i != e; ++i)
+ {
+ // don't use the get methods as they strip of typedef infos
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(i->first)) {
+ addSubNode("FundamentalType");
+ addAttribute("name", BT->getName(Ctx->getLangOptions().CPlusPlus));
+ }
+ else if (const PointerType *PT = dyn_cast<PointerType>(i->first)) {
+ addSubNode("PointerType");
+ addTypeAttribute(PT->getPointeeType());
+ }
+ else if (dyn_cast<FunctionType>(i->first) != 0) {
+ addSubNode("FunctionType");
+ }
+ else if (const ReferenceType *RT = dyn_cast<ReferenceType>(i->first)) {
+ addSubNode("ReferenceType");
+ addTypeAttribute(RT->getPointeeType());
+ }
+ else if (const TypedefType * TT = dyn_cast<TypedefType>(i->first)) {
+ addSubNode("Typedef");
+ addAttribute("name", TT->getDecl()->getNameAsString());
+ addTypeAttribute(TT->getDecl()->getUnderlyingType());
+ addContextAttribute(TT->getDecl()->getDeclContext());
+ }
+ else if (const QualifiedNameType *QT = dyn_cast<QualifiedNameType>(i->first)) {
+ addSubNode("QualifiedNameType");
+ addTypeAttribute(QT->getNamedType());
+ }
+ else if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(i->first)) {
+ addSubNode("ArrayType");
+ addAttribute("min", 0);
+ addAttribute("max", (CAT->getSize() - 1).toString(10, false));
+ addTypeAttribute(CAT->getElementType());
+ }
+ else if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(i->first)) {
+ addSubNode("VariableArrayType");
+ addTypeAttribute(VAT->getElementType());
+ }
+ else if (const TagType *RET = dyn_cast<TagType>(i->first)) {
+ const TagDecl *tagDecl = RET->getDecl();
+ std::string tagKind = tagDecl->getKindName();
+ tagKind[0] = std::toupper(tagKind[0]);
+ addSubNode(tagKind);
+ addAttribute("name", tagDecl->getNameAsString());
+ addContextAttribute(tagDecl->getDeclContext());
+ }
+ else if (const VectorType* VT = dyn_cast<VectorType>(i->first)) {
+ addSubNode("VectorType");
+ addTypeAttribute(VT->getElementType());
+ addAttribute("num_elements", VT->getNumElements());
+ }
+ else
+ {
+ addSubNode("FIXMEType");
+ }
+ addAttribute("id", getPrefixedId(i->second, ID_NORMAL));
+ toParent();
+ }
+
+
+ toParent().addSubNode("Contexts");
+
+ for (XML::IdMap<const DeclContext*>::iterator i = Contexts.begin(), e = Contexts.end(); i != e; ++i)
+ {
+ addSubNode(i->first->getDeclKindName());
+ addAttribute("id", getPrefixedId(i->second, ID_NORMAL));
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(i->first)) {
+ addAttribute("name", ND->getNameAsString());
+ }
+ if (const TagDecl *TD = dyn_cast<TagDecl>(i->first)) {
+ addAttribute("type", getPrefixedId(BasicTypes[TD->getTypeForDecl()], ID_NORMAL));
+ }
+ else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(i->first)) {
+ addAttribute("type", getPrefixedId(BasicTypes[FD->getType()->getAsFunctionType()], ID_NORMAL));
+ }
+
+ if (const DeclContext* parent = i->first->getParent())
+ {
+ addContextAttribute(parent);
+ }
+ toParent();
+ }
+
+ toParent().addSubNode("Files");
+
+ for (XML::IdMap<std::string>::iterator i = SourceFiles.begin(), e = SourceFiles.end(); i != e; ++i)
+ {
+ addSubNode("File");
+ addAttribute("id", getPrefixedId(i->second, ID_FILE));
+ addAttribute("name", escapeString(i->first.c_str(), i->first.size()));
+ toParent();
+ }
+
+ toParent().toParent();
+
+ // write the root closing node (which has always subnodes)
+ Out << "</" << CurrentNode->Name << ">\n";
+}
+
+//---------------------------------------------------------
+void DocumentXML::addTypeAttribute(const QualType& pType)
+{
+ addTypeRecursively(pType);
+ addAttribute("type", getPrefixedId(Types[pType], ID_NORMAL));
+}
+
+//---------------------------------------------------------
+void DocumentXML::addTypeIdAttribute(const Type* pType)
+{
+ addBasicTypeRecursively(pType);
+ addAttribute("id", getPrefixedId(BasicTypes[pType], ID_NORMAL));
+}
+
+//---------------------------------------------------------
+void DocumentXML::addTypeRecursively(const QualType& pType)
+{
+ if (addToMap(Types, pType))
+ {
+ addBasicTypeRecursively(pType.getTypePtr());
+ // beautifier: a non-qualified type shall be transparent
+ if (pType.getCVRQualifiers() == 0)
+ {
+ Types[pType] = BasicTypes[pType.getTypePtr()];
+ }
+ }
+}
+
+//---------------------------------------------------------
+void DocumentXML::addBasicTypeRecursively(const Type* pType)
+{
+ if (addToMap(BasicTypes, pType))
+ {
+ if (const PointerType *PT = dyn_cast<PointerType>(pType)) {
+ addTypeRecursively(PT->getPointeeType());
+ }
+ else if (const ReferenceType *RT = dyn_cast<ReferenceType>(pType)) {
+ addTypeRecursively(RT->getPointeeType());
+ }
+ else if (const TypedefType *TT = dyn_cast<TypedefType>(pType)) {
+ addTypeRecursively(TT->getDecl()->getUnderlyingType());
+ addContextsRecursively(TT->getDecl()->getDeclContext());
+ }
+ else if (const QualifiedNameType *QT = dyn_cast<QualifiedNameType>(pType)) {
+ addTypeRecursively(QT->getNamedType());
+ // FIXME: what to do with NestedNameSpecifier or shall this type be transparent?
+ }
+ else if (const ArrayType *AT = dyn_cast<ArrayType>(pType)) {
+ addTypeRecursively(AT->getElementType());
+ // FIXME: doesn't work in the immediate streaming approach
+ /*if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT))
+ {
+ addSubNode("VariableArraySizeExpression");
+ PrintStmt(VAT->getSizeExpr());
+ toParent();
+ }*/
+ }
+ }
+}
+
+//---------------------------------------------------------
+void DocumentXML::addContextAttribute(const DeclContext *DC, tContextUsage usage)
+{
+ addContextsRecursively(DC);
+ const char* pAttributeTags[2] = { "context", "id" };
+ addAttribute(pAttributeTags[usage], getPrefixedId(Contexts[DC], ID_NORMAL));
+}
+
+//---------------------------------------------------------
+void DocumentXML::addContextsRecursively(const DeclContext *DC)
+{
+ if (DC != 0 && addToMap(Contexts, DC))
+ {
+ addContextsRecursively(DC->getParent());
+ }
+}
+
+//---------------------------------------------------------
+void DocumentXML::addSourceFileAttribute(const std::string& fileName)
+{
+ addToMap(SourceFiles, fileName, ID_FILE);
+ addAttribute("file", getPrefixedId(SourceFiles[fileName], ID_FILE));
+}
+
+//---------------------------------------------------------
+PresumedLoc DocumentXML::addLocation(const SourceLocation& Loc)
+{
+ SourceManager& SM = Ctx->getSourceManager();
+ SourceLocation SpellingLoc = SM.getSpellingLoc(Loc);
+ PresumedLoc PLoc;
+ if (!SpellingLoc.isInvalid())
+ {
+ PLoc = SM.getPresumedLoc(SpellingLoc);
+ addSourceFileAttribute(PLoc.getFilename());
+ addAttribute("line", PLoc.getLine());
+ addAttribute("col", PLoc.getColumn());
+ }
+ // else there is no error in some cases (eg. CXXThisExpr)
+ return PLoc;
+}
+
+//---------------------------------------------------------
+void DocumentXML::addLocationRange(const SourceRange& R)
+{
+ PresumedLoc PStartLoc = addLocation(R.getBegin());
+ if (R.getBegin() != R.getEnd())
+ {
+ SourceManager& SM = Ctx->getSourceManager();
+ SourceLocation SpellingLoc = SM.getSpellingLoc(R.getEnd());
+ if (!SpellingLoc.isInvalid())
+ {
+ PresumedLoc PLoc = SM.getPresumedLoc(SpellingLoc);
+ if (PStartLoc.isInvalid() ||
+ strcmp(PLoc.getFilename(), PStartLoc.getFilename()) != 0) {
+ addToMap(SourceFiles, PLoc.getFilename(), ID_FILE);
+ addAttribute("endfile", PLoc.getFilename());
+ addAttribute("endline", PLoc.getLine());
+ addAttribute("endcol", PLoc.getColumn());
+ } else if (PLoc.getLine() != PStartLoc.getLine()) {
+ addAttribute("endline", PLoc.getLine());
+ addAttribute("endcol", PLoc.getColumn());
+ } else {
+ addAttribute("endcol", PLoc.getColumn());
+ }
+ }
+ }
+}
+
+//---------------------------------------------------------
+void DocumentXML::PrintFunctionDecl(FunctionDecl *FD)
+{
+ switch (FD->getStorageClass()) {
+ default: assert(0 && "Unknown storage class");
+ case FunctionDecl::None: break;
+ case FunctionDecl::Extern: addAttribute("storage_class", "extern"); break;
+ case FunctionDecl::Static: addAttribute("storage_class", "static"); break;
+ case FunctionDecl::PrivateExtern: addAttribute("storage_class", "__private_extern__"); break;
+ }
+
+ if (FD->isInline())
+ addAttribute("inline", "1");
+
+ const FunctionType *AFT = FD->getType()->getAsFunctionType();
+ addTypeAttribute(AFT->getResultType());
+ addBasicTypeRecursively(AFT);
+
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(AFT)) {
+ addAttribute("num_args", FD->getNumParams());
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
+ addSubNode("Argument");
+ ParmVarDecl *argDecl = FD->getParamDecl(i);
+ addAttribute("name", argDecl->getNameAsString());
+ addTypeAttribute(FT->getArgType(i));
+ addDeclIdAttribute(argDecl);
+ if (argDecl->getDefaultArg())
+ {
+ addAttribute("default_arg", "1");
+ PrintStmt(argDecl->getDefaultArg());
+ }
+ toParent();
+ }
+
+ if (FT->isVariadic()) {
+ addSubNode("Ellipsis").toParent();
+ }
+ } else {
+ assert(isa<FunctionNoProtoType>(AFT));
+ }
+}
+
+//---------------------------------------------------------
+void DocumentXML::addRefAttribute(const NamedDecl* D)
+{
+ // FIXME: in case of CXX inline member functions referring to a member defined
+ // after the function it needs to be tested, if the ids are already there
+ // (should work, but I couldn't test it)
+ if (const DeclContext* DC = dyn_cast<DeclContext>(D))
+ {
+ addAttribute("ref", getPrefixedId(Contexts[DC], ID_NORMAL));
+ }
+ else
+ {
+ addAttribute("ref", getPrefixedId(Decls[D], ID_NORMAL));
+ }
+}
+
+//---------------------------------------------------------
+void DocumentXML::addDeclIdAttribute(const NamedDecl* D)
+{
+ addToMap(Decls, D);
+ addAttribute("id", getPrefixedId(Decls[D], ID_NORMAL));
+}
+
+//---------------------------------------------------------
+void DocumentXML::PrintDecl(Decl *D)
+{
+ addSubNode(D->getDeclKindName());
+ addContextAttribute(D->getDeclContext());
+ addLocation(D->getLocation());
+ if (DeclContext* DC = dyn_cast<DeclContext>(D))
+ {
+ addContextAttribute(DC, CONTEXT_AS_ID);
+ }
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ addAttribute("name", ND->getNameAsString());
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ PrintFunctionDecl(FD);
+ if (Stmt *Body = FD->getBody(*Ctx)) {
+ addSubNode("Body");
+ PrintStmt(Body);
+ toParent();
+ }
+ } else if (RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ addBasicTypeRecursively(RD->getTypeForDecl());
+ addAttribute("type", getPrefixedId(BasicTypes[RD->getTypeForDecl()], ID_NORMAL));
+ if (!RD->isDefinition())
+ {
+ addAttribute("forward", "1");
+ }
+
+ for (RecordDecl::field_iterator i = RD->field_begin(*Ctx), e = RD->field_end(*Ctx); i != e; ++i)
+ {
+ PrintDecl(*i);
+ }
+ } else if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ const QualType& enumType = ED->getIntegerType();
+ if (!enumType.isNull())
+ {
+ addTypeAttribute(enumType);
+ for (EnumDecl::enumerator_iterator i = ED->enumerator_begin(*Ctx), e = ED->enumerator_end(*Ctx); i != e; ++i)
+ {
+ PrintDecl(*i);
+ }
+ }
+ } else if (EnumConstantDecl* ECD = dyn_cast<EnumConstantDecl>(D)) {
+ addTypeAttribute(ECD->getType());
+ addAttribute("value", ECD->getInitVal().toString(10, true));
+ if (ECD->getInitExpr())
+ {
+ PrintStmt(ECD->getInitExpr());
+ }
+ } else if (FieldDecl *FdD = dyn_cast<FieldDecl>(D)) {
+ addTypeAttribute(FdD->getType());
+ addDeclIdAttribute(ND);
+ if (FdD->isMutable())
+ addAttribute("mutable", "1");
+ if (FdD->isBitField())
+ {
+ addAttribute("bitfield", "1");
+ PrintStmt(FdD->getBitWidth());
+ }
+ } else if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
+ addTypeIdAttribute(Ctx->getTypedefType(TD).getTypePtr());
+ addTypeAttribute(TD->getUnderlyingType());
+ } else if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ addTypeAttribute(VD->getType());
+ addDeclIdAttribute(ND);
+
+ VarDecl *V = dyn_cast<VarDecl>(VD);
+ if (V && V->getStorageClass() != VarDecl::None)
+ {
+ addAttribute("storage_class", VarDecl::getStorageClassSpecifierString(V->getStorageClass()));
+ }
+
+ if (V && V->getInit())
+ {
+ PrintStmt(V->getInit());
+ }
+ }
+ } else if (LinkageSpecDecl* LSD = dyn_cast<LinkageSpecDecl>(D)) {
+ switch (LSD->getLanguage())
+ {
+ case LinkageSpecDecl::lang_c: addAttribute("lang", "C"); break;
+ case LinkageSpecDecl::lang_cxx: addAttribute("lang", "CXX"); break;
+ default: assert(0 && "Unexpected lang id");
+ }
+ } else if (isa<FileScopeAsmDecl>(D)) {
+ // FIXME: Implement this
+ } else {
+ assert(0 && "Unexpected decl");
+ }
+ toParent();
+}
+
+//---------------------------------------------------------
+} // NS clang
+
diff --git a/lib/Frontend/FixItRewriter.cpp b/lib/Frontend/FixItRewriter.cpp
new file mode 100644
index 0000000..1ed89d7
--- /dev/null
+++ b/lib/Frontend/FixItRewriter.cpp
@@ -0,0 +1,199 @@
+//===--- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a diagnostic client adaptor that performs rewrites as
+// suggested by code modification hints attached to diagnostics. It
+// then forwards any diagnostics to the adapted diagnostic client.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FixItRewriter.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+using namespace clang;
+
+FixItRewriter::FixItRewriter(Diagnostic &Diags, SourceManager &SourceMgr,
+ const LangOptions &LangOpts)
+ : Diags(Diags), Rewrite(SourceMgr, LangOpts), NumFailures(0) {
+ Client = Diags.getClient();
+ Diags.setClient(this);
+}
+
+FixItRewriter::~FixItRewriter() {
+ Diags.setClient(Client);
+}
+
+bool FixItRewriter::WriteFixedFile(const std::string &InFileName,
+ const std::string &OutFileName) {
+ if (NumFailures > 0) {
+ Diag(FullSourceLoc(), diag::warn_fixit_no_changes);
+ return true;
+ }
+
+ llvm::OwningPtr<llvm::raw_ostream> OwnedStream;
+ llvm::raw_ostream *OutFile;
+ if (!OutFileName.empty()) {
+ std::string Err;
+ OutFile = new llvm::raw_fd_ostream(OutFileName.c_str(),
+ // set binary mode (critical for Windoze)
+ true,
+ Err);
+ OwnedStream.reset(OutFile);
+ } else if (InFileName == "-") {
+ OutFile = &llvm::outs();
+ } else {
+ llvm::sys::Path Path(InFileName);
+ std::string Suffix = Path.getSuffix();
+ Path.eraseSuffix();
+ Path.appendSuffix("fixit." + Suffix);
+ std::string Err;
+ OutFile = new llvm::raw_fd_ostream(Path.toString().c_str(),
+ // set binary mode (critical for Windoze)
+ true,
+ Err);
+ OwnedStream.reset(OutFile);
+ }
+
+ FileID MainFileID = Rewrite.getSourceMgr().getMainFileID();
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ std::fprintf(stderr, "Main file is unchanged\n");
+ }
+ OutFile->flush();
+
+ return false;
+}
+
+bool FixItRewriter::IncludeInDiagnosticCounts() const {
+ return Client? Client->IncludeInDiagnosticCounts() : true;
+}
+
+void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
+ const DiagnosticInfo &Info) {
+ Client->HandleDiagnostic(DiagLevel, Info);
+
+ // Skip over any diagnostics that are ignored.
+ if (DiagLevel == Diagnostic::Ignored)
+ return;
+
+ if (!FixItLocations.empty()) {
+ // The user has specified the locations where we should perform
+ // the various fix-it modifications.
+
+ // If this diagnostic does not have any code modifications,
+ // completely ignore it, even if it's an error: fix-it locations
+ // are meant to perform specific fix-ups even in the presence of
+ // other errors.
+ if (Info.getNumCodeModificationHints() == 0)
+ return;
+
+ // See if the location of the error is one that matches what the
+ // user requested.
+ bool AcceptableLocation = false;
+ const FileEntry *File
+ = Rewrite.getSourceMgr().getFileEntryForID(
+ Info.getLocation().getFileID());
+ unsigned Line = Info.getLocation().getSpellingLineNumber();
+ unsigned Column = Info.getLocation().getSpellingColumnNumber();
+ for (llvm::SmallVector<RequestedSourceLocation, 4>::iterator
+ Loc = FixItLocations.begin(), LocEnd = FixItLocations.end();
+ Loc != LocEnd; ++Loc) {
+ if (Loc->File == File && Loc->Line == Line && Loc->Column == Column) {
+ AcceptableLocation = true;
+ break;
+ }
+ }
+
+ if (!AcceptableLocation)
+ return;
+ }
+
+ // Make sure that we can perform all of the modifications we
+ // in this diagnostic.
+ bool CanRewrite = Info.getNumCodeModificationHints() > 0;
+ for (unsigned Idx = 0, Last = Info.getNumCodeModificationHints();
+ Idx < Last; ++Idx) {
+ const CodeModificationHint &Hint = Info.getCodeModificationHint(Idx);
+ if (Hint.RemoveRange.isValid() &&
+ Rewrite.getRangeSize(Hint.RemoveRange) == -1) {
+ CanRewrite = false;
+ break;
+ }
+
+ if (Hint.InsertionLoc.isValid() &&
+ !Rewrite.isRewritable(Hint.InsertionLoc)) {
+ CanRewrite = false;
+ break;
+ }
+ }
+
+ if (!CanRewrite) {
+ if (Info.getNumCodeModificationHints() > 0)
+ Diag(Info.getLocation(), diag::note_fixit_in_macro);
+
+ // If this was an error, refuse to perform any rewriting.
+ if (DiagLevel == Diagnostic::Error || DiagLevel == Diagnostic::Fatal) {
+ if (++NumFailures == 1)
+ Diag(Info.getLocation(), diag::note_fixit_unfixed_error);
+ }
+ return;
+ }
+
+ bool Failed = false;
+ for (unsigned Idx = 0, Last = Info.getNumCodeModificationHints();
+ Idx < Last; ++Idx) {
+ const CodeModificationHint &Hint = Info.getCodeModificationHint(Idx);
+ if (!Hint.RemoveRange.isValid()) {
+ // We're adding code.
+ if (Rewrite.InsertStrBefore(Hint.InsertionLoc, Hint.CodeToInsert))
+ Failed = true;
+ continue;
+ }
+
+ if (Hint.CodeToInsert.empty()) {
+ // We're removing code.
+ if (Rewrite.RemoveText(Hint.RemoveRange.getBegin(),
+ Rewrite.getRangeSize(Hint.RemoveRange)))
+ Failed = true;
+ continue;
+ }
+
+ // We're replacing code.
+ if (Rewrite.ReplaceText(Hint.RemoveRange.getBegin(),
+ Rewrite.getRangeSize(Hint.RemoveRange),
+ Hint.CodeToInsert.c_str(),
+ Hint.CodeToInsert.size()))
+ Failed = true;
+ }
+
+ if (Failed) {
+ ++NumFailures;
+ Diag(Info.getLocation(), diag::note_fixit_failed);
+ return;
+ }
+
+ Diag(Info.getLocation(), diag::note_fixit_applied);
+}
+
+/// \brief Emit a diagnostic via the adapted diagnostic client.
+void FixItRewriter::Diag(FullSourceLoc Loc, unsigned DiagID) {
+ // When producing this diagnostic, we temporarily bypass ourselves,
+ // clear out any current diagnostic, and let the downstream client
+ // format the diagnostic.
+ Diags.setClient(Client);
+ Diags.Clear();
+ Diags.Report(Loc, DiagID);
+ Diags.setClient(this);
+}
diff --git a/lib/Frontend/GeneratePCH.cpp b/lib/Frontend/GeneratePCH.cpp
new file mode 100644
index 0000000..8be88ce
--- /dev/null
+++ b/lib/Frontend/GeneratePCH.cpp
@@ -0,0 +1,78 @@
+//===--- GeneratePCH.cpp - AST Consumer for PCH Generation ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CreatePCHGenerate function, which creates an
+// ASTConsume that generates a PCH file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/PCHWriter.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/System/Path.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Streams.h"
+#include <string>
+
+using namespace clang;
+using namespace llvm;
+
+namespace {
+ class VISIBILITY_HIDDEN PCHGenerator : public SemaConsumer {
+ const Preprocessor &PP;
+ llvm::raw_ostream *Out;
+ Sema *SemaPtr;
+ MemorizeStatCalls *StatCalls; // owned by the FileManager
+
+ public:
+ explicit PCHGenerator(const Preprocessor &PP, llvm::raw_ostream *Out);
+ virtual void InitializeSema(Sema &S) { SemaPtr = &S; }
+ virtual void HandleTranslationUnit(ASTContext &Ctx);
+ };
+}
+
+PCHGenerator::PCHGenerator(const Preprocessor &PP, llvm::raw_ostream *OS)
+ : PP(PP), Out(OS), SemaPtr(0), StatCalls(0) {
+
+ // Install a stat() listener to keep track of all of the stat()
+ // calls.
+ StatCalls = new MemorizeStatCalls;
+ PP.getFileManager().setStatCache(StatCalls);
+}
+
+void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
+ if (PP.getDiagnostics().hasErrorOccurred())
+ return;
+
+ // Write the PCH contents into a buffer
+ std::vector<unsigned char> Buffer;
+ BitstreamWriter Stream(Buffer);
+ PCHWriter Writer(Stream);
+
+ // Emit the PCH file
+ assert(SemaPtr && "No Sema?");
+ Writer.WritePCH(*SemaPtr, StatCalls);
+
+ // Write the generated bitstream to "Out".
+ Out->write((char *)&Buffer.front(), Buffer.size());
+
+ // Make sure it hits disk now.
+ Out->flush();
+}
+
+ASTConsumer *clang::CreatePCHGenerator(const Preprocessor &PP,
+ llvm::raw_ostream *OS) {
+ return new PCHGenerator(PP, OS);
+}
diff --git a/lib/Frontend/HTMLDiagnostics.cpp b/lib/Frontend/HTMLDiagnostics.cpp
new file mode 100644
index 0000000..9cfe0b2
--- /dev/null
+++ b/lib/Frontend/HTMLDiagnostics.cpp
@@ -0,0 +1,602 @@
+//===--- HTMLDiagnostics.cpp - HTML Diagnostics for Paths ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HTMLDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+#include <fstream>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Boilerplate.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class VISIBILITY_HIDDEN HTMLDiagnostics : public PathDiagnosticClient {
+ llvm::sys::Path Directory, FilePrefix;
+ bool createdDir, noDir;
+ Preprocessor* PP;
+ std::vector<const PathDiagnostic*> BatchedDiags;
+public:
+ HTMLDiagnostics(const std::string& prefix, Preprocessor* pp);
+
+ virtual ~HTMLDiagnostics();
+
+ virtual void SetPreprocessor(Preprocessor *pp) { PP = pp; }
+
+ virtual void HandlePathDiagnostic(const PathDiagnostic* D);
+
+ unsigned ProcessMacroPiece(llvm::raw_ostream& os,
+ const PathDiagnosticMacroPiece& P,
+ unsigned num);
+
+ void HandlePiece(Rewriter& R, FileID BugFileID,
+ const PathDiagnosticPiece& P, unsigned num, unsigned max);
+
+ void HighlightRange(Rewriter& R, FileID BugFileID, SourceRange Range,
+ const char *HighlightStart = "<span class=\"mrange\">",
+ const char *HighlightEnd = "</span>");
+
+ void ReportDiag(const PathDiagnostic& D);
+};
+
+} // end anonymous namespace
+
+HTMLDiagnostics::HTMLDiagnostics(const std::string& prefix, Preprocessor* pp)
+ : Directory(prefix), FilePrefix(prefix), createdDir(false), noDir(false),
+ PP(pp) {
+
+ // All html files begin with "report"
+ FilePrefix.appendComponent("report");
+}
+
+PathDiagnosticClient*
+clang::CreateHTMLDiagnosticClient(const std::string& prefix, Preprocessor* PP,
+ PreprocessorFactory*) {
+ return new HTMLDiagnostics(prefix, PP);
+}
+
+//===----------------------------------------------------------------------===//
+// Report processing.
+//===----------------------------------------------------------------------===//
+
+void HTMLDiagnostics::HandlePathDiagnostic(const PathDiagnostic* D) {
+ if (!D)
+ return;
+
+ if (D->empty()) {
+ delete D;
+ return;
+ }
+
+ const_cast<PathDiagnostic*>(D)->flattenLocations();
+ BatchedDiags.push_back(D);
+}
+
+HTMLDiagnostics::~HTMLDiagnostics() {
+ while (!BatchedDiags.empty()) {
+ const PathDiagnostic* D = BatchedDiags.back();
+ BatchedDiags.pop_back();
+ ReportDiag(*D);
+ delete D;
+ }
+}
+
+void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D) {
+ // Create the HTML directory if it is missing.
+ if (!createdDir) {
+ createdDir = true;
+ std::string ErrorMsg;
+ Directory.createDirectoryOnDisk(true, &ErrorMsg);
+
+ if (!Directory.isDirectory()) {
+ llvm::cerr << "warning: could not create directory '"
+ << Directory.toString() << "'\n"
+ << "reason: " << ErrorMsg << '\n';
+
+ noDir = true;
+
+ return;
+ }
+ }
+
+ if (noDir)
+ return;
+
+ const SourceManager &SMgr = D.begin()->getLocation().getManager();
+ FileID FID;
+
+ // Verify that the entire path is from the same FileID.
+ for (PathDiagnostic::const_iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ FullSourceLoc L = I->getLocation().asLocation().getInstantiationLoc();
+
+ if (FID.isInvalid()) {
+ FID = SMgr.getFileID(L);
+ } else if (SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+
+ // Check the source ranges.
+ for (PathDiagnosticPiece::range_iterator RI=I->ranges_begin(),
+ RE=I->ranges_end(); RI!=RE; ++RI) {
+
+ SourceLocation L = SMgr.getInstantiationLoc(RI->getBegin());
+
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+
+ L = SMgr.getInstantiationLoc(RI->getEnd());
+
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+ }
+ }
+
+ if (FID.isInvalid())
+ return; // FIXME: Emit a warning?
+
+ // Create a new rewriter to generate HTML.
+ Rewriter R(const_cast<SourceManager&>(SMgr), PP->getLangOptions());
+
+ // Process the path.
+ unsigned n = D.size();
+ unsigned max = n;
+
+ for (PathDiagnostic::const_reverse_iterator I=D.rbegin(), E=D.rend();
+ I!=E; ++I, --n)
+ HandlePiece(R, FID, *I, n, max);
+
+ // Add line numbers, header, footer, etc.
+
+ // unsigned FID = R.getSourceMgr().getMainFileID();
+ html::EscapeText(R, FID);
+ html::AddLineNumbers(R, FID);
+
+ // If we have a preprocessor, relex the file and syntax highlight.
+ // We might not have a preprocessor if we come from a deserialized AST file,
+ // for example.
+
+ if (PP) html::SyntaxHighlight(R, FID, *PP);
+
+ // FIXME: We eventually want to use PPF to create a fresh Preprocessor,
+ // once we have worked out the bugs.
+ //
+ // if (PPF) html::HighlightMacros(R, FID, *PPF);
+ //
+ if (PP) html::HighlightMacros(R, FID, *PP);
+
+ // Get the full directory name of the analyzed file.
+
+ const FileEntry* Entry = SMgr.getFileEntryForID(FID);
+
+ // This is a cludge; basically we want to append either the full
+ // working directory if we have no directory information. This is
+ // a work in progress.
+
+ std::string DirName = "";
+
+ if (!llvm::sys::Path(Entry->getName()).isAbsolute()) {
+ llvm::sys::Path P = llvm::sys::Path::GetCurrentDirectory();
+ DirName = P.toString() + "/";
+ }
+
+ // Add the name of the file as an <h1> tag.
+
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+
+ os << "<!-- REPORTHEADER -->\n"
+ << "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n"
+ "<tr><td class=\"rowname\">File:</td><td>"
+ << html::EscapeText(DirName)
+ << html::EscapeText(Entry->getName())
+ << "</td></tr>\n<tr><td class=\"rowname\">Location:</td><td>"
+ "<a href=\"#EndPath\">line "
+ << (*D.rbegin()).getLocation().asLocation().getInstantiationLineNumber()
+ << ", column "
+ << (*D.rbegin()).getLocation().asLocation().getInstantiationColumnNumber()
+ << "</a></td></tr>\n"
+ "<tr><td class=\"rowname\">Description:</td><td>"
+ << D.getDescription() << "</td></tr>\n";
+
+ // Output any other meta data.
+
+ for (PathDiagnostic::meta_iterator I=D.meta_begin(), E=D.meta_end();
+ I!=E; ++I) {
+ os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
+ }
+
+ os << "</table>\n<!-- REPORTSUMMARYEXTRA -->\n"
+ "<h3>Annotated Source Code</h3>\n";
+
+ R.InsertStrBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ // Embed meta-data tags.
+
+ const std::string& BugDesc = D.getDescription();
+
+ if (!BugDesc.empty()) {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "\n<!-- BUGDESC " << BugDesc << " -->\n";
+ R.InsertStrBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ const std::string& BugType = D.getBugType();
+ if (!BugType.empty()) {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "\n<!-- BUGTYPE " << BugType << " -->\n";
+ R.InsertStrBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ const std::string& BugCategory = D.getCategory();
+
+ if (!BugCategory.empty()) {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n";
+ R.InsertStrBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "\n<!-- BUGFILE " << DirName << Entry->getName() << " -->\n";
+ R.InsertStrBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "\n<!-- BUGLINE "
+ << D.back()->getLocation().asLocation().getInstantiationLineNumber()
+ << " -->\n";
+ R.InsertStrBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "\n<!-- BUGPATHLENGTH " << D.size() << " -->\n";
+ R.InsertStrBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ // Add CSS, header, and footer.
+
+ html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName());
+
+ // Get the rewrite buffer.
+ const RewriteBuffer *Buf = R.getRewriteBufferFor(FID);
+
+ if (!Buf) {
+ llvm::cerr << "warning: no diagnostics generated for main file.\n";
+ return;
+ }
+
+ // Create the stream to write out the HTML.
+ std::ofstream os;
+
+ {
+ // Create a path for the target HTML file.
+ llvm::sys::Path F(FilePrefix);
+ F.makeUnique(false, NULL);
+
+ // Rename the file with an HTML extension.
+ llvm::sys::Path H(F);
+ H.appendSuffix("html");
+ F.renamePathOnDisk(H, NULL);
+
+ os.open(H.toString().c_str());
+
+ if (!os) {
+ llvm::cerr << "warning: could not create file '" << F.toString() << "'\n";
+ return;
+ }
+ }
+
+ // Emit the HTML to disk.
+
+ for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I)
+ os << *I;
+}
+
+void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
+ const PathDiagnosticPiece& P,
+ unsigned num, unsigned max) {
+
+ // For now, just draw a box above the line in question, and emit the
+ // warning.
+ FullSourceLoc Pos = P.getLocation().asLocation();
+
+ if (!Pos.isValid())
+ return;
+
+ SourceManager &SM = R.getSourceMgr();
+ assert(&Pos.getManager() == &SM && "SourceManagers are different!");
+ std::pair<FileID, unsigned> LPosInfo = SM.getDecomposedInstantiationLoc(Pos);
+
+ if (LPosInfo.first != BugFileID)
+ return;
+
+ const llvm::MemoryBuffer *Buf = SM.getBuffer(LPosInfo.first);
+ const char* FileStart = Buf->getBufferStart();
+
+ // Compute the column number. Rewind from the current position to the start
+ // of the line.
+ unsigned ColNo = SM.getColumnNumber(LPosInfo.first, LPosInfo.second);
+ const char *TokInstantiationPtr =Pos.getInstantiationLoc().getCharacterData();
+ const char *LineStart = TokInstantiationPtr-ColNo;
+
+ // Compute LineEnd.
+ const char *LineEnd = TokInstantiationPtr;
+ const char* FileEnd = Buf->getBufferEnd();
+ while (*LineEnd != '\n' && LineEnd != FileEnd)
+ ++LineEnd;
+
+ // Compute the margin offset by counting tabs and non-tabs.
+ unsigned PosNo = 0;
+ for (const char* c = LineStart; c != TokInstantiationPtr; ++c)
+ PosNo += *c == '\t' ? 8 : 1;
+
+ // Create the html for the message.
+
+ const char *Kind = 0;
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::Event: Kind = "Event"; break;
+ case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break;
+ // Setting Kind to "Control" is intentional.
+ case PathDiagnosticPiece::Macro: Kind = "Control"; break;
+ }
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "\n<tr><td class=\"num\"></td><td class=\"line\"><div id=\"";
+
+ if (num == max)
+ os << "EndPath";
+ else
+ os << "Path" << num;
+
+ os << "\" class=\"msg";
+ if (Kind)
+ os << " msg" << Kind;
+ os << "\" style=\"margin-left:" << PosNo << "ex";
+
+ // Output a maximum size.
+ if (!isa<PathDiagnosticMacroPiece>(P)) {
+ // Get the string and determining its maximum substring.
+ const std::string& Msg = P.getString();
+ unsigned max_token = 0;
+ unsigned cnt = 0;
+ unsigned len = Msg.size();
+
+ for (std::string::const_iterator I=Msg.begin(), E=Msg.end(); I!=E; ++I)
+ switch (*I) {
+ default:
+ ++cnt;
+ continue;
+ case ' ':
+ case '\t':
+ case '\n':
+ if (cnt > max_token) max_token = cnt;
+ cnt = 0;
+ }
+
+ if (cnt > max_token)
+ max_token = cnt;
+
+ // Determine the approximate size of the message bubble in em.
+ unsigned em;
+ const unsigned max_line = 120;
+
+ if (max_token >= max_line)
+ em = max_token / 2;
+ else {
+ unsigned characters = max_line;
+ unsigned lines = len / max_line;
+
+ if (lines > 0) {
+ for (; characters > max_token; --characters)
+ if (len / characters > lines) {
+ ++characters;
+ break;
+ }
+ }
+
+ em = characters / 2;
+ }
+
+ if (em < max_line/2)
+ os << "; max-width:" << em << "em";
+ }
+ else
+ os << "; max-width:100em";
+
+ os << "\">";
+
+ if (max > 1) {
+ os << "<table class=\"msgT\"><tr><td valign=\"top\">";
+ os << "<div class=\"PathIndex";
+ if (Kind) os << " PathIndex" << Kind;
+ os << "\">" << num << "</div>";
+ os << "</td><td>";
+ }
+
+ if (const PathDiagnosticMacroPiece *MP =
+ dyn_cast<PathDiagnosticMacroPiece>(&P)) {
+
+ os << "Within the expansion of the macro '";
+
+ // Get the name of the macro by relexing it.
+ {
+ FullSourceLoc L = MP->getLocation().asLocation().getInstantiationLoc();
+ assert(L.isFileID());
+ std::pair<const char*, const char*> BufferInfo = L.getBufferData();
+ const char* MacroName = L.getDecomposedLoc().second + BufferInfo.first;
+ Lexer rawLexer(L, PP->getLangOptions(), BufferInfo.first,
+ MacroName, BufferInfo.second);
+
+ Token TheTok;
+ rawLexer.LexFromRawLexer(TheTok);
+ for (unsigned i = 0, n = TheTok.getLength(); i < n; ++i)
+ os << MacroName[i];
+ }
+
+ os << "':\n";
+
+ if (max > 1)
+ os << "</td></tr></table>";
+
+ // Within a macro piece. Write out each event.
+ ProcessMacroPiece(os, *MP, 0);
+ }
+ else {
+ os << html::EscapeText(P.getString());
+
+ if (max > 1)
+ os << "</td></tr></table>";
+ }
+
+ os << "</div></td></tr>";
+
+ // Insert the new html.
+ unsigned DisplayPos = LineEnd - FileStart;
+ SourceLocation Loc =
+ SM.getLocForStartOfFile(LPosInfo.first).getFileLocWithOffset(DisplayPos);
+
+ R.InsertStrBefore(Loc, os.str());
+
+ // Now highlight the ranges.
+ for (const SourceRange *I = P.ranges_begin(), *E = P.ranges_end();
+ I != E; ++I)
+ HighlightRange(R, LPosInfo.first, *I);
+
+#if 0
+ // If there is a code insertion hint, insert that code.
+ // FIXME: This code is disabled because it seems to mangle the HTML
+ // output. I'm leaving it here because it's generally the right idea,
+ // but needs some help from someone more familiar with the rewriter.
+ for (const CodeModificationHint *Hint = P.code_modifications_begin(),
+ *HintEnd = P.code_modifications_end();
+ Hint != HintEnd; ++Hint) {
+ if (Hint->RemoveRange.isValid()) {
+ HighlightRange(R, LPosInfo.first, Hint->RemoveRange,
+ "<span class=\"CodeRemovalHint\">", "</span>");
+ }
+ if (Hint->InsertionLoc.isValid()) {
+ std::string EscapedCode = html::EscapeText(Hint->CodeToInsert, true);
+ EscapedCode = "<span class=\"CodeInsertionHint\">" + EscapedCode
+ + "</span>";
+ R.InsertStrBefore(Hint->InsertionLoc, EscapedCode);
+ }
+ }
+#endif
+}
+
+static void EmitAlphaCounter(llvm::raw_ostream& os, unsigned n) {
+ llvm::SmallVector<char, 10> buf;
+
+ do {
+ unsigned x = n % ('z' - 'a');
+ buf.push_back('a' + x);
+ n = n / ('z' - 'a');
+ } while (n);
+
+ assert(!buf.empty());
+
+ for (llvm::SmallVectorImpl<char>::reverse_iterator I=buf.rbegin(),
+ E=buf.rend(); I!=E; ++I)
+ os << *I;
+}
+
+unsigned HTMLDiagnostics::ProcessMacroPiece(llvm::raw_ostream& os,
+ const PathDiagnosticMacroPiece& P,
+ unsigned num) {
+
+ for (PathDiagnosticMacroPiece::const_iterator I=P.begin(), E=P.end();
+ I!=E; ++I) {
+
+ if (const PathDiagnosticMacroPiece *MP =
+ dyn_cast<PathDiagnosticMacroPiece>(*I)) {
+ num = ProcessMacroPiece(os, *MP, num);
+ continue;
+ }
+
+ if (PathDiagnosticEventPiece *EP = dyn_cast<PathDiagnosticEventPiece>(*I)) {
+ os << "<div class=\"msg msgEvent\" style=\"width:94%; "
+ "margin-left:5px\">"
+ "<table class=\"msgT\"><tr>"
+ "<td valign=\"top\"><div class=\"PathIndex PathIndexEvent\">";
+ EmitAlphaCounter(os, num++);
+ os << "</div></td><td valign=\"top\">"
+ << html::EscapeText(EP->getString())
+ << "</td></tr></table></div>\n";
+ }
+ }
+
+ return num;
+}
+
+void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID,
+ SourceRange Range,
+ const char *HighlightStart,
+ const char *HighlightEnd) {
+ SourceManager &SM = R.getSourceMgr();
+ const LangOptions &LangOpts = R.getLangOpts();
+
+ SourceLocation InstantiationStart = SM.getInstantiationLoc(Range.getBegin());
+ unsigned StartLineNo = SM.getInstantiationLineNumber(InstantiationStart);
+
+ SourceLocation InstantiationEnd = SM.getInstantiationLoc(Range.getEnd());
+ unsigned EndLineNo = SM.getInstantiationLineNumber(InstantiationEnd);
+
+ if (EndLineNo < StartLineNo)
+ return;
+
+ if (SM.getFileID(InstantiationStart) != BugFileID ||
+ SM.getFileID(InstantiationEnd) != BugFileID)
+ return;
+
+ // Compute the column number of the end.
+ unsigned EndColNo = SM.getInstantiationColumnNumber(InstantiationEnd);
+ unsigned OldEndColNo = EndColNo;
+
+ if (EndColNo) {
+ // Add in the length of the token, so that we cover multi-char tokens.
+ EndColNo += Lexer::MeasureTokenLength(Range.getEnd(), SM, LangOpts)-1;
+ }
+
+ // Highlight the range. Make the span tag the outermost tag for the
+ // selected range.
+
+ SourceLocation E =
+ InstantiationEnd.getFileLocWithOffset(EndColNo - OldEndColNo);
+
+ html::HighlightRange(R, InstantiationStart, E, HighlightStart, HighlightEnd);
+}
diff --git a/lib/Frontend/HTMLPrint.cpp b/lib/Frontend/HTMLPrint.cpp
new file mode 100644
index 0000000..d5eb9fb
--- /dev/null
+++ b/lib/Frontend/HTMLPrint.cpp
@@ -0,0 +1,92 @@
+//===--- HTMLPrint.cpp - Source code -> HTML pretty-printing --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Pretty-printing of source code to HTML.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Decl.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Functional HTML pretty-printing.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class HTMLPrinter : public ASTConsumer {
+ Rewriter R;
+ llvm::raw_ostream *Out;
+ Diagnostic &Diags;
+ Preprocessor *PP;
+ PreprocessorFactory *PPF;
+ public:
+ HTMLPrinter(llvm::raw_ostream *OS, Diagnostic &D, Preprocessor *pp,
+ PreprocessorFactory* ppf)
+ : Out(OS), Diags(D), PP(pp), PPF(ppf) {}
+ virtual ~HTMLPrinter();
+
+ void Initialize(ASTContext &context);
+ };
+}
+
+ASTConsumer* clang::CreateHTMLPrinter(llvm::raw_ostream *OS,
+ Diagnostic &D, Preprocessor *PP,
+ PreprocessorFactory* PPF) {
+
+ return new HTMLPrinter(OS, D, PP, PPF);
+}
+
+void HTMLPrinter::Initialize(ASTContext &context) {
+ R.setSourceMgr(context.getSourceManager(), context.getLangOptions());
+}
+
+HTMLPrinter::~HTMLPrinter() {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Format the file.
+ FileID FID = R.getSourceMgr().getMainFileID();
+ const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID);
+ const char* Name;
+ // In some cases, in particular the case where the input is from stdin,
+ // there is no entry. Fall back to the memory buffer for a name in those
+ // cases.
+ if (Entry)
+ Name = Entry->getName();
+ else
+ Name = R.getSourceMgr().getBuffer(FID)->getBufferIdentifier();
+
+ html::AddLineNumbers(R, FID);
+ html::AddHeaderFooterInternalBuiltinCSS(R, FID, Name);
+
+ // If we have a preprocessor, relex the file and syntax highlight.
+ // We might not have a preprocessor if we come from a deserialized AST file,
+ // for example.
+
+ if (PP) html::SyntaxHighlight(R, FID, *PP);
+ if (PPF) html::HighlightMacros(R, FID, *PP);
+ html::EscapeText(R, FID, false, true);
+
+ // Emit the HTML.
+ const RewriteBuffer &RewriteBuf = R.getEditBuffer(FID);
+ char *Buffer = (char*)malloc(RewriteBuf.size());
+ std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer);
+ Out->write(Buffer, RewriteBuf.size());
+ free(Buffer);
+}
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
new file mode 100644
index 0000000..6383c20
--- /dev/null
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -0,0 +1,327 @@
+//===--- InitHeaderSearch.cpp - Initialize header search paths ----------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the InitHeaderSearch class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/InitHeaderSearch.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/System/Path.h"
+#include "llvm/Config/config.h"
+#include <cstdio>
+#include <vector>
+using namespace clang;
+
+void InitHeaderSearch::AddPath(const std::string &Path, IncludeDirGroup Group,
+ bool isCXXAware, bool isUserSupplied,
+ bool isFramework, bool IgnoreSysRoot) {
+ assert(!Path.empty() && "can't handle empty path here");
+ FileManager &FM = Headers.getFileMgr();
+
+ // Compute the actual path, taking into consideration -isysroot.
+ llvm::SmallString<256> MappedPath;
+
+ // Handle isysroot.
+ if (Group == System && !IgnoreSysRoot) {
+ // FIXME: Portability. This should be a sys::Path interface, this doesn't
+ // handle things like C:\ right, nor win32 \\network\device\blah.
+ if (isysroot.size() != 1 || isysroot[0] != '/') // Add isysroot if present.
+ MappedPath.append(isysroot.begin(), isysroot.end());
+ }
+
+ MappedPath.append(Path.begin(), Path.end());
+
+ // Compute the DirectoryLookup type.
+ SrcMgr::CharacteristicKind Type;
+ if (Group == Quoted || Group == Angled)
+ Type = SrcMgr::C_User;
+ else if (isCXXAware)
+ Type = SrcMgr::C_System;
+ else
+ Type = SrcMgr::C_ExternCSystem;
+
+
+ // If the directory exists, add it.
+ if (const DirectoryEntry *DE = FM.getDirectory(&MappedPath[0],
+ &MappedPath[0]+
+ MappedPath.size())) {
+ IncludeGroup[Group].push_back(DirectoryLookup(DE, Type, isUserSupplied,
+ isFramework));
+ return;
+ }
+
+ // Check to see if this is an apple-style headermap (which are not allowed to
+ // be frameworks).
+ if (!isFramework) {
+ if (const FileEntry *FE = FM.getFile(&MappedPath[0],
+ &MappedPath[0]+MappedPath.size())) {
+ if (const HeaderMap *HM = Headers.CreateHeaderMap(FE)) {
+ // It is a headermap, add it to the search path.
+ IncludeGroup[Group].push_back(DirectoryLookup(HM, Type,isUserSupplied));
+ return;
+ }
+ }
+ }
+
+ if (Verbose)
+ fprintf(stderr, "ignoring nonexistent directory \"%s\"\n",
+ MappedPath.c_str());
+}
+
+
+void InitHeaderSearch::AddEnvVarPaths(const char *Name) {
+ const char* at = getenv(Name);
+ if (!at || *at == 0) // Empty string should not add '.' path.
+ return;
+
+ const char* delim = strchr(at, llvm::sys::PathSeparator);
+ while (delim != 0) {
+ if (delim-at == 0)
+ AddPath(".", Angled, false, true, false);
+ else
+ AddPath(std::string(at, std::string::size_type(delim-at)), Angled, false,
+ true, false);
+ at = delim + 1;
+ delim = strchr(at, llvm::sys::PathSeparator);
+ }
+ if (*at == 0)
+ AddPath(".", Angled, false, true, false);
+ else
+ AddPath(at, Angled, false, true, false);
+}
+
+
+void InitHeaderSearch::AddDefaultSystemIncludePaths(const LangOptions &Lang) {
+ // FIXME: temporary hack: hard-coded paths.
+ // FIXME: get these from the target?
+
+#ifdef LLVM_ON_WIN32
+ if (Lang.CPlusPlus) {
+ // Mingw32 GCC version 4
+ AddPath("c:/mingw/lib/gcc/mingw32/4.3.0/include/c++",
+ System, true, false, false);
+ AddPath("c:/mingw/lib/gcc/mingw32/4.3.0/include/c++/mingw32",
+ System, true, false, false);
+ AddPath("c:/mingw/lib/gcc/mingw32/4.3.0/include/c++/backward",
+ System, true, false, false);
+ }
+
+ // Mingw32 GCC version 4
+ AddPath("C:/mingw/include", System, false, false, false);
+#else
+
+ if (Lang.CPlusPlus) {
+ AddPath("/usr/include/c++/4.2.1", System, true, false, false);
+ AddPath("/usr/include/c++/4.2.1/i686-apple-darwin10", System, true, false,
+ false);
+ AddPath("/usr/include/c++/4.2.1/backward", System, true, false, false);
+
+ AddPath("/usr/include/c++/4.0.0", System, true, false, false);
+ AddPath("/usr/include/c++/4.0.0/i686-apple-darwin8", System, true, false,
+ false);
+ AddPath("/usr/include/c++/4.0.0/backward", System, true, false, false);
+
+ // Ubuntu 7.10 - Gutsy Gibbon
+ AddPath("/usr/include/c++/4.1.3", System, true, false, false);
+ AddPath("/usr/include/c++/4.1.3/i486-linux-gnu", System, true, false,
+ false);
+ AddPath("/usr/include/c++/4.1.3/backward", System, true, false, false);
+
+ // Fedora 8
+ AddPath("/usr/include/c++/4.1.2", System, true, false, false);
+ AddPath("/usr/include/c++/4.1.2/i386-redhat-linux", System, true, false,
+ false);
+ AddPath("/usr/include/c++/4.1.2/backward", System, true, false, false);
+
+ // Fedora 9
+ AddPath("/usr/include/c++/4.3.0", System, true, false, false);
+ AddPath("/usr/include/c++/4.3.0/i386-redhat-linux", System, true, false,
+ false);
+ AddPath("/usr/include/c++/4.3.0/backward", System, true, false, false);
+
+ // Fedora 10
+ AddPath("/usr/include/c++/4.3.2", System, true, false, false);
+ AddPath("/usr/include/c++/4.3.2/i386-redhat-linux", System, true, false,
+ false);
+ AddPath("/usr/include/c++/4.3.2/backward", System, true, false, false);
+
+ // Arch Linux 2008-06-24
+ AddPath("/usr/include/c++/4.3.1", System, true, false, false);
+ AddPath("/usr/include/c++/4.3.1/i686-pc-linux-gnu", System, true, false,
+ false);
+ AddPath("/usr/include/c++/4.3.1/backward", System, true, false, false);
+ AddPath("/usr/include/c++/4.3.1/x86_64-unknown-linux-gnu", System, true,
+ false, false);
+
+ // Gentoo x86 stable
+ AddPath("/usr/lib/gcc/i686-pc-linux-gnu/4.1.2/include/g++-v4", System,
+ true, false, false);
+ AddPath("/usr/lib/gcc/i686-pc-linux-gnu/4.1.2/include/g++-v4/"
+ "i686-pc-linux-gnu", System, true, false, false);
+ AddPath("/usr/lib/gcc/i686-pc-linux-gnu/4.1.2/include/g++-v4/backward",
+ System, true, false, false);
+
+ // DragonFly
+ AddPath("/usr/include/c++/4.1", System, true, false, false);
+
+ // FreeBSD
+ AddPath("/usr/include/c++/4.2", System, true, false, false);
+ }
+
+ AddPath("/usr/local/include", System, false, false, false);
+
+ AddPath("/usr/include", System, false, false, false);
+ AddPath("/System/Library/Frameworks", System, true, false, true);
+ AddPath("/Library/Frameworks", System, true, false, true);
+#endif
+}
+
+void InitHeaderSearch::AddDefaultEnvVarPaths(const LangOptions &Lang) {
+ AddEnvVarPaths("CPATH");
+ if (Lang.CPlusPlus && Lang.ObjC1)
+ AddEnvVarPaths("OBJCPLUS_INCLUDE_PATH");
+ else if (Lang.CPlusPlus)
+ AddEnvVarPaths("CPLUS_INCLUDE_PATH");
+ else if (Lang.ObjC1)
+ AddEnvVarPaths("OBJC_INCLUDE_PATH");
+ else
+ AddEnvVarPaths("C_INCLUDE_PATH");
+}
+
+
+/// RemoveDuplicates - If there are duplicate directory entries in the specified
+/// search list, remove the later (dead) ones.
+static void RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
+ bool Verbose) {
+ llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenDirs;
+ llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenFrameworkDirs;
+ llvm::SmallPtrSet<const HeaderMap *, 8> SeenHeaderMaps;
+ for (unsigned i = 0; i != SearchList.size(); ++i) {
+ unsigned DirToRemove = i;
+
+ const DirectoryLookup &CurEntry = SearchList[i];
+
+ if (CurEntry.isNormalDir()) {
+ // If this isn't the first time we've seen this dir, remove it.
+ if (SeenDirs.insert(CurEntry.getDir()))
+ continue;
+ } else if (CurEntry.isFramework()) {
+ // If this isn't the first time we've seen this framework dir, remove it.
+ if (SeenFrameworkDirs.insert(CurEntry.getFrameworkDir()))
+ continue;
+ } else {
+ assert(CurEntry.isHeaderMap() && "Not a headermap or normal dir?");
+ // If this isn't the first time we've seen this headermap, remove it.
+ if (SeenHeaderMaps.insert(CurEntry.getHeaderMap()))
+ continue;
+ }
+
+ // If we have a normal #include dir/framework/headermap that is shadowed
+ // later in the chain by a system include location, we actually want to
+ // ignore the user's request and drop the user dir... keeping the system
+ // dir. This is weird, but required to emulate GCC's search path correctly.
+ //
+ // Since dupes of system dirs are rare, just rescan to find the original
+ // that we're nuking instead of using a DenseMap.
+ if (CurEntry.getDirCharacteristic() != SrcMgr::C_User) {
+ // Find the dir that this is the same of.
+ unsigned FirstDir;
+ for (FirstDir = 0; ; ++FirstDir) {
+ assert(FirstDir != i && "Didn't find dupe?");
+
+ const DirectoryLookup &SearchEntry = SearchList[FirstDir];
+
+ // If these are different lookup types, then they can't be the dupe.
+ if (SearchEntry.getLookupType() != CurEntry.getLookupType())
+ continue;
+
+ bool isSame;
+ if (CurEntry.isNormalDir())
+ isSame = SearchEntry.getDir() == CurEntry.getDir();
+ else if (CurEntry.isFramework())
+ isSame = SearchEntry.getFrameworkDir() == CurEntry.getFrameworkDir();
+ else {
+ assert(CurEntry.isHeaderMap() && "Not a headermap or normal dir?");
+ isSame = SearchEntry.getHeaderMap() == CurEntry.getHeaderMap();
+ }
+
+ if (isSame)
+ break;
+ }
+
+ // If the first dir in the search path is a non-system dir, zap it
+ // instead of the system one.
+ if (SearchList[FirstDir].getDirCharacteristic() == SrcMgr::C_User)
+ DirToRemove = FirstDir;
+ }
+
+ if (Verbose) {
+ fprintf(stderr, "ignoring duplicate directory \"%s\"\n",
+ CurEntry.getName());
+ if (DirToRemove != i)
+ fprintf(stderr, " as it is a non-system directory that duplicates"
+ " a system directory\n");
+ }
+
+ // This is reached if the current entry is a duplicate. Remove the
+ // DirToRemove (usually the current dir).
+ SearchList.erase(SearchList.begin()+DirToRemove);
+ --i;
+ }
+}
+
+
+void InitHeaderSearch::Realize() {
+ // Concatenate ANGLE+SYSTEM+AFTER chains together into SearchList.
+ std::vector<DirectoryLookup> SearchList;
+ SearchList = IncludeGroup[Angled];
+ SearchList.insert(SearchList.end(), IncludeGroup[System].begin(),
+ IncludeGroup[System].end());
+ SearchList.insert(SearchList.end(), IncludeGroup[After].begin(),
+ IncludeGroup[After].end());
+ RemoveDuplicates(SearchList, Verbose);
+ RemoveDuplicates(IncludeGroup[Quoted], Verbose);
+
+ // Prepend QUOTED list on the search list.
+ SearchList.insert(SearchList.begin(), IncludeGroup[Quoted].begin(),
+ IncludeGroup[Quoted].end());
+
+
+ bool DontSearchCurDir = false; // TODO: set to true if -I- is set?
+ Headers.SetSearchPaths(SearchList, IncludeGroup[Quoted].size(),
+ DontSearchCurDir);
+
+ // If verbose, print the list of directories that will be searched.
+ if (Verbose) {
+ fprintf(stderr, "#include \"...\" search starts here:\n");
+ unsigned QuotedIdx = IncludeGroup[Quoted].size();
+ for (unsigned i = 0, e = SearchList.size(); i != e; ++i) {
+ if (i == QuotedIdx)
+ fprintf(stderr, "#include <...> search starts here:\n");
+ const char *Name = SearchList[i].getName();
+ const char *Suffix;
+ if (SearchList[i].isNormalDir())
+ Suffix = "";
+ else if (SearchList[i].isFramework())
+ Suffix = " (framework directory)";
+ else {
+ assert(SearchList[i].isHeaderMap() && "Unknown DirectoryLookup");
+ Suffix = " (headermap)";
+ }
+ fprintf(stderr, " %s%s\n", Name, Suffix);
+ }
+ fprintf(stderr, "End of search list.\n");
+ }
+}
+
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
new file mode 100644
index 0000000..0945037
--- /dev/null
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -0,0 +1,495 @@
+//===--- InitPreprocessor.cpp - PP initialization code. ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the clang::InitializePreprocessor function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/InitPreprocessor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/System/Path.h"
+
+namespace clang {
+
+// Append a #define line to Buf for Macro. Macro should be of the form XXX,
+// in which case we emit "#define XXX 1" or "XXX=Y z W" in which case we emit
+// "#define XXX Y z W". To get a #define with no value, use "XXX=".
+static void DefineBuiltinMacro(std::vector<char> &Buf, const char *Macro,
+ const char *Command = "#define ") {
+ Buf.insert(Buf.end(), Command, Command+strlen(Command));
+ if (const char *Equal = strchr(Macro, '=')) {
+ // Turn the = into ' '.
+ Buf.insert(Buf.end(), Macro, Equal);
+ Buf.push_back(' ');
+
+ // Per GCC -D semantics, the macro ends at \n if it exists.
+ const char *End = strpbrk(Equal, "\n\r");
+ if (End) {
+ fprintf(stderr, "warning: macro '%s' contains embedded newline, text "
+ "after the newline is ignored.\n",
+ std::string(Macro, Equal).c_str());
+ } else {
+ End = Equal+strlen(Equal);
+ }
+
+ Buf.insert(Buf.end(), Equal+1, End);
+ } else {
+ // Push "macroname 1".
+ Buf.insert(Buf.end(), Macro, Macro+strlen(Macro));
+ Buf.push_back(' ');
+ Buf.push_back('1');
+ }
+ Buf.push_back('\n');
+}
+
+// Append a #undef line to Buf for Macro. Macro should be of the form XXX
+// and we emit "#undef XXX".
+static void UndefineBuiltinMacro(std::vector<char> &Buf, const char *Macro) {
+ // Push "macroname".
+ const char *Command = "#undef ";
+ Buf.insert(Buf.end(), Command, Command+strlen(Command));
+ Buf.insert(Buf.end(), Macro, Macro+strlen(Macro));
+ Buf.push_back('\n');
+}
+
+/// Add the quoted name of an implicit include file.
+static void AddQuotedIncludePath(std::vector<char> &Buf,
+ const std::string &File) {
+ // Implicit include paths should be resolved relative to the current
+ // working directory first, and then use the regular header search
+ // mechanism. The proper way to handle this is to have the
+ // predefines buffer located at the current working directory, but
+ // it has not file entry. For now, workaround this by using an
+ // absolute path if we find the file here, and otherwise letting
+ // header search handle it.
+ llvm::sys::Path Path(File);
+ Path.makeAbsolute();
+ if (!Path.exists())
+ Path = File;
+
+ // Escape double quotes etc.
+ Buf.push_back('"');
+ std::string EscapedFile = Lexer::Stringify(Path.toString());
+ Buf.insert(Buf.end(), EscapedFile.begin(), EscapedFile.end());
+ Buf.push_back('"');
+}
+
+/// AddImplicitInclude - Add an implicit #include of the specified file to the
+/// predefines buffer.
+static void AddImplicitInclude(std::vector<char> &Buf,
+ const std::string &File) {
+ const char *Inc = "#include ";
+ Buf.insert(Buf.end(), Inc, Inc+strlen(Inc));
+ AddQuotedIncludePath(Buf, File);
+ Buf.push_back('\n');
+}
+
+static void AddImplicitIncludeMacros(std::vector<char> &Buf,
+ const std::string &File) {
+ const char *Inc = "#__include_macros ";
+ Buf.insert(Buf.end(), Inc, Inc+strlen(Inc));
+ AddQuotedIncludePath(Buf, File);
+ Buf.push_back('\n');
+ // Marker token to stop the __include_macros fetch loop.
+ const char *Marker = "##\n"; // ##?
+ Buf.insert(Buf.end(), Marker, Marker+strlen(Marker));
+}
+
+/// AddImplicitIncludePTH - Add an implicit #include using the original file
+/// used to generate a PTH cache.
+static void AddImplicitIncludePTH(std::vector<char> &Buf, Preprocessor &PP,
+ const std::string& ImplicitIncludePTH) {
+ PTHManager *P = PP.getPTHManager();
+ assert(P && "No PTHManager.");
+ const char *OriginalFile = P->getOriginalSourceFile();
+
+ if (!OriginalFile) {
+ assert(!ImplicitIncludePTH.empty());
+ fprintf(stderr, "error: PTH file '%s' does not designate an original "
+ "source header file for -include-pth\n",
+ ImplicitIncludePTH.c_str());
+ exit (1);
+ }
+
+ AddImplicitInclude(Buf, OriginalFile);
+}
+
+/// PickFP - This is used to pick a value based on the FP semantics of the
+/// specified FP model.
+template <typename T>
+static T PickFP(const llvm::fltSemantics *Sem, T IEEESingleVal,
+ T IEEEDoubleVal, T X87DoubleExtendedVal, T PPCDoubleDoubleVal,
+ T IEEEQuadVal) {
+ if (Sem == &llvm::APFloat::IEEEsingle)
+ return IEEESingleVal;
+ if (Sem == &llvm::APFloat::IEEEdouble)
+ return IEEEDoubleVal;
+ if (Sem == &llvm::APFloat::x87DoubleExtended)
+ return X87DoubleExtendedVal;
+ if (Sem == &llvm::APFloat::PPCDoubleDouble)
+ return PPCDoubleDoubleVal;
+ assert(Sem == &llvm::APFloat::IEEEquad);
+ return IEEEQuadVal;
+}
+
+static void DefineFloatMacros(std::vector<char> &Buf, const char *Prefix,
+ const llvm::fltSemantics *Sem) {
+ const char *DenormMin, *Epsilon, *Max, *Min;
+ DenormMin = PickFP(Sem, "1.40129846e-45F", "4.9406564584124654e-324",
+ "3.64519953188247460253e-4951L",
+ "4.94065645841246544176568792868221e-324L",
+ "6.47517511943802511092443895822764655e-4966L");
+ int Digits = PickFP(Sem, 6, 15, 18, 31, 33);
+ Epsilon = PickFP(Sem, "1.19209290e-7F", "2.2204460492503131e-16",
+ "1.08420217248550443401e-19L",
+ "4.94065645841246544176568792868221e-324L",
+ "1.92592994438723585305597794258492732e-34L");
+ int HasInifinity = 1, HasQuietNaN = 1;
+ int MantissaDigits = PickFP(Sem, 24, 53, 64, 106, 113);
+ int Min10Exp = PickFP(Sem, -37, -307, -4931, -291, -4931);
+ int Max10Exp = PickFP(Sem, 38, 308, 4932, 308, 4932);
+ int MinExp = PickFP(Sem, -125, -1021, -16381, -968, -16381);
+ int MaxExp = PickFP(Sem, 128, 1024, 16384, 1024, 16384);
+ Min = PickFP(Sem, "1.17549435e-38F", "2.2250738585072014e-308",
+ "3.36210314311209350626e-4932L",
+ "2.00416836000897277799610805135016e-292L",
+ "3.36210314311209350626267781732175260e-4932L");
+ Max = PickFP(Sem, "3.40282347e+38F", "1.7976931348623157e+308",
+ "1.18973149535723176502e+4932L",
+ "1.79769313486231580793728971405301e+308L",
+ "1.18973149535723176508575932662800702e+4932L");
+
+ char MacroBuf[100];
+ sprintf(MacroBuf, "__%s_DENORM_MIN__=%s", Prefix, DenormMin);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_DIG__=%d", Prefix, Digits);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_EPSILON__=%s", Prefix, Epsilon);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_HAS_INFINITY__=%d", Prefix, HasInifinity);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_HAS_QUIET_NAN__=%d", Prefix, HasQuietNaN);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_MANT_DIG__=%d", Prefix, MantissaDigits);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_MAX_10_EXP__=%d", Prefix, Max10Exp);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_MAX_EXP__=%d", Prefix, MaxExp);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_MAX__=%s", Prefix, Max);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_MIN_10_EXP__=(%d)", Prefix, Min10Exp);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_MIN_EXP__=(%d)", Prefix, MinExp);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_MIN__=%s", Prefix, Min);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ sprintf(MacroBuf, "__%s_HAS_DENORM__=1", Prefix);
+ DefineBuiltinMacro(Buf, MacroBuf);
+}
+
+
+/// DefineTypeSize - Emit a macro to the predefines buffer that declares a macro
+/// named MacroName with the max value for a type with width 'TypeWidth' a
+/// signedness of 'isSigned' and with a value suffix of 'ValSuffix' (e.g. LL).
+static void DefineTypeSize(const char *MacroName, unsigned TypeWidth,
+ const char *ValSuffix, bool isSigned,
+ std::vector<char> &Buf) {
+ char MacroBuf[60];
+ long long MaxVal;
+ if (isSigned)
+ MaxVal = (1LL << (TypeWidth - 1)) - 1;
+ else
+ MaxVal = ~0LL >> (64-TypeWidth);
+
+ sprintf(MacroBuf, "%s=%llu%s", MacroName, MaxVal, ValSuffix);
+ DefineBuiltinMacro(Buf, MacroBuf);
+}
+
+static void DefineType(const char *MacroName, TargetInfo::IntType Ty,
+ std::vector<char> &Buf) {
+ char MacroBuf[60];
+ sprintf(MacroBuf, "%s=%s", MacroName, TargetInfo::getTypeName(Ty));
+ DefineBuiltinMacro(Buf, MacroBuf);
+}
+
+
+static void InitializePredefinedMacros(const TargetInfo &TI,
+ const LangOptions &LangOpts,
+ std::vector<char> &Buf) {
+ char MacroBuf[60];
+ // Compiler version introspection macros.
+ DefineBuiltinMacro(Buf, "__llvm__=1"); // LLVM Backend
+ DefineBuiltinMacro(Buf, "__clang__=1"); // Clang Frontend
+
+ // Currently claim to be compatible with GCC 4.2.1-5621.
+ DefineBuiltinMacro(Buf, "__APPLE_CC__=5621");
+ DefineBuiltinMacro(Buf, "__GNUC_MINOR__=2");
+ DefineBuiltinMacro(Buf, "__GNUC_PATCHLEVEL__=1");
+ DefineBuiltinMacro(Buf, "__GNUC__=4");
+ DefineBuiltinMacro(Buf, "__GXX_ABI_VERSION=1002");
+ DefineBuiltinMacro(Buf, "__VERSION__=\"4.2.1 Compatible Clang Compiler\"");
+
+
+ // Initialize language-specific preprocessor defines.
+
+ // These should all be defined in the preprocessor according to the
+ // current language configuration.
+ if (!LangOpts.Microsoft)
+ DefineBuiltinMacro(Buf, "__STDC__=1");
+ if (LangOpts.AsmPreprocessor)
+ DefineBuiltinMacro(Buf, "__ASSEMBLER__=1");
+ if (LangOpts.C99 && !LangOpts.CPlusPlus)
+ DefineBuiltinMacro(Buf, "__STDC_VERSION__=199901L");
+ else if (0) // STDC94 ?
+ DefineBuiltinMacro(Buf, "__STDC_VERSION__=199409L");
+
+ // Standard conforming mode?
+ if (!LangOpts.GNUMode)
+ DefineBuiltinMacro(Buf, "__STRICT_ANSI__=1");
+
+ if (LangOpts.CPlusPlus0x)
+ DefineBuiltinMacro(Buf, "__GXX_EXPERIMENTAL_CXX0X__");
+
+ if (LangOpts.Freestanding)
+ DefineBuiltinMacro(Buf, "__STDC_HOSTED__=0");
+ else
+ DefineBuiltinMacro(Buf, "__STDC_HOSTED__=1");
+
+ if (LangOpts.ObjC1) {
+ DefineBuiltinMacro(Buf, "__OBJC__=1");
+ if (LangOpts.ObjCNonFragileABI) {
+ DefineBuiltinMacro(Buf, "__OBJC2__=1");
+ DefineBuiltinMacro(Buf, "OBJC_ZEROCOST_EXCEPTIONS=1");
+ DefineBuiltinMacro(Buf, "__EXCEPTIONS=1");
+ }
+
+ if (LangOpts.getGCMode() != LangOptions::NonGC)
+ DefineBuiltinMacro(Buf, "__OBJC_GC__=1");
+
+ if (LangOpts.NeXTRuntime)
+ DefineBuiltinMacro(Buf, "__NEXT_RUNTIME__=1");
+ }
+
+ // darwin_constant_cfstrings controls this. This is also dependent
+ // on other things like the runtime I believe. This is set even for C code.
+ DefineBuiltinMacro(Buf, "__CONSTANT_CFSTRINGS__=1");
+
+ if (LangOpts.ObjC2)
+ DefineBuiltinMacro(Buf, "OBJC_NEW_PROPERTIES");
+
+ if (LangOpts.ObjCSenderDispatch)
+ DefineBuiltinMacro(Buf, "__OBJC_SENDER_AWARE_DISPATCH__");
+
+ if (LangOpts.PascalStrings)
+ DefineBuiltinMacro(Buf, "__PASCAL_STRINGS__");
+
+ if (LangOpts.Blocks) {
+ DefineBuiltinMacro(Buf, "__block=__attribute__((__blocks__(byref)))");
+ DefineBuiltinMacro(Buf, "__BLOCKS__=1");
+ }
+
+ if (LangOpts.CPlusPlus) {
+ DefineBuiltinMacro(Buf, "__DEPRECATED=1");
+ DefineBuiltinMacro(Buf, "__EXCEPTIONS=1");
+ DefineBuiltinMacro(Buf, "__GNUG__=4");
+ DefineBuiltinMacro(Buf, "__GXX_WEAK__=1");
+ DefineBuiltinMacro(Buf, "__cplusplus=1");
+ DefineBuiltinMacro(Buf, "__private_extern__=extern");
+ }
+
+ // Filter out some microsoft extensions when trying to parse in ms-compat
+ // mode.
+ if (LangOpts.Microsoft) {
+ DefineBuiltinMacro(Buf, "_cdecl=__cdecl");
+ DefineBuiltinMacro(Buf, "__int8=__INT8_TYPE__");
+ DefineBuiltinMacro(Buf, "__int16=__INT16_TYPE__");
+ DefineBuiltinMacro(Buf, "__int32=__INT32_TYPE__");
+ DefineBuiltinMacro(Buf, "__int64=__INT64_TYPE__");
+ }
+
+ if (LangOpts.Optimize)
+ DefineBuiltinMacro(Buf, "__OPTIMIZE__=1");
+ if (LangOpts.OptimizeSize)
+ DefineBuiltinMacro(Buf, "__OPTIMIZE_SIZE__=1");
+
+ // Initialize target-specific preprocessor defines.
+
+ // Define type sizing macros based on the target properties.
+ assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
+ DefineBuiltinMacro(Buf, "__CHAR_BIT__=8");
+
+ unsigned IntMaxWidth;
+ const char *IntMaxSuffix;
+ if (TI.getIntMaxType() == TargetInfo::SignedLongLong) {
+ IntMaxWidth = TI.getLongLongWidth();
+ IntMaxSuffix = "LL";
+ } else if (TI.getIntMaxType() == TargetInfo::SignedLong) {
+ IntMaxWidth = TI.getLongWidth();
+ IntMaxSuffix = "L";
+ } else {
+ assert(TI.getIntMaxType() == TargetInfo::SignedInt);
+ IntMaxWidth = TI.getIntWidth();
+ IntMaxSuffix = "";
+ }
+
+ DefineTypeSize("__SCHAR_MAX__", TI.getCharWidth(), "", true, Buf);
+ DefineTypeSize("__SHRT_MAX__", TI.getShortWidth(), "", true, Buf);
+ DefineTypeSize("__INT_MAX__", TI.getIntWidth(), "", true, Buf);
+ DefineTypeSize("__LONG_MAX__", TI.getLongWidth(), "L", true, Buf);
+ DefineTypeSize("__LONG_LONG_MAX__", TI.getLongLongWidth(), "LL", true, Buf);
+ DefineTypeSize("__WCHAR_MAX__", TI.getWCharWidth(), "", true, Buf);
+ DefineTypeSize("__INTMAX_MAX__", IntMaxWidth, IntMaxSuffix, true, Buf);
+
+ DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Buf);
+ DefineType("__UINTMAX_TYPE__", TI.getUIntMaxType(), Buf);
+ DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(0), Buf);
+ DefineType("__INTPTR_TYPE__", TI.getIntPtrType(), Buf);
+ DefineType("__SIZE_TYPE__", TI.getSizeType(), Buf);
+ DefineType("__WCHAR_TYPE__", TI.getWCharType(), Buf);
+ // FIXME: TargetInfo hookize __WINT_TYPE__.
+ DefineBuiltinMacro(Buf, "__WINT_TYPE__=int");
+
+ DefineFloatMacros(Buf, "FLT", &TI.getFloatFormat());
+ DefineFloatMacros(Buf, "DBL", &TI.getDoubleFormat());
+ DefineFloatMacros(Buf, "LDBL", &TI.getLongDoubleFormat());
+
+ // Define a __POINTER_WIDTH__ macro for stdint.h.
+ sprintf(MacroBuf, "__POINTER_WIDTH__=%d", (int)TI.getPointerWidth(0));
+ DefineBuiltinMacro(Buf, MacroBuf);
+
+ if (!TI.isCharSigned())
+ DefineBuiltinMacro(Buf, "__CHAR_UNSIGNED__");
+
+ // Define fixed-sized integer types for stdint.h
+ assert(TI.getCharWidth() == 8 && "unsupported target types");
+ assert(TI.getShortWidth() == 16 && "unsupported target types");
+ DefineBuiltinMacro(Buf, "__INT8_TYPE__=char");
+ DefineBuiltinMacro(Buf, "__INT16_TYPE__=short");
+
+ if (TI.getIntWidth() == 32)
+ DefineBuiltinMacro(Buf, "__INT32_TYPE__=int");
+ else {
+ assert(TI.getLongLongWidth() == 32 && "unsupported target types");
+ DefineBuiltinMacro(Buf, "__INT32_TYPE__=long long");
+ }
+
+ // 16-bit targets doesn't necessarily have a 64-bit type.
+ if (TI.getLongLongWidth() == 64)
+ DefineBuiltinMacro(Buf, "__INT64_TYPE__=long long");
+
+ // Add __builtin_va_list typedef.
+ {
+ const char *VAList = TI.getVAListDeclaration();
+ Buf.insert(Buf.end(), VAList, VAList+strlen(VAList));
+ Buf.push_back('\n');
+ }
+
+ if (const char *Prefix = TI.getUserLabelPrefix()) {
+ sprintf(MacroBuf, "__USER_LABEL_PREFIX__=%s", Prefix);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ }
+
+ // Build configuration options. FIXME: these should be controlled by
+ // command line options or something.
+ DefineBuiltinMacro(Buf, "__FINITE_MATH_ONLY__=0");
+
+ if (LangOpts.Static)
+ DefineBuiltinMacro(Buf, "__STATIC__=1");
+ else
+ DefineBuiltinMacro(Buf, "__DYNAMIC__=1");
+
+ if (LangOpts.GNUInline)
+ DefineBuiltinMacro(Buf, "__GNUC_GNU_INLINE__=1");
+ else
+ DefineBuiltinMacro(Buf, "__GNUC_STDC_INLINE__=1");
+
+ if (LangOpts.NoInline)
+ DefineBuiltinMacro(Buf, "__NO_INLINE__=1");
+
+ if (unsigned PICLevel = LangOpts.PICLevel) {
+ sprintf(MacroBuf, "__PIC__=%d", PICLevel);
+ DefineBuiltinMacro(Buf, MacroBuf);
+
+ sprintf(MacroBuf, "__pic__=%d", PICLevel);
+ DefineBuiltinMacro(Buf, MacroBuf);
+ }
+
+ // Macros to control C99 numerics and <float.h>
+ DefineBuiltinMacro(Buf, "__FLT_EVAL_METHOD__=0");
+ DefineBuiltinMacro(Buf, "__FLT_RADIX__=2");
+ sprintf(MacroBuf, "__DECIMAL_DIG__=%d",
+ PickFP(&TI.getLongDoubleFormat(), -1/*FIXME*/, 17, 21, 33, 36));
+ DefineBuiltinMacro(Buf, MacroBuf);
+
+ // Get other target #defines.
+ TI.getTargetDefines(LangOpts, Buf);
+}
+
+/// InitializePreprocessor - Initialize the preprocessor getting it and the
+/// environment ready to process a single file. This returns true on error.
+///
+bool InitializePreprocessor(Preprocessor &PP,
+ const PreprocessorInitOptions& InitOpts) {
+ std::vector<char> PredefineBuffer;
+
+ const char *LineDirective = "# 1 \"<built-in>\" 3\n";
+ PredefineBuffer.insert(PredefineBuffer.end(),
+ LineDirective, LineDirective+strlen(LineDirective));
+
+ // Install things like __POWERPC__, __GNUC__, etc into the macro table.
+ InitializePredefinedMacros(PP.getTargetInfo(), PP.getLangOptions(),
+ PredefineBuffer);
+
+ // Add on the predefines from the driver. Wrap in a #line directive to report
+ // that they come from the command line.
+ LineDirective = "# 1 \"<command line>\" 1\n";
+ PredefineBuffer.insert(PredefineBuffer.end(),
+ LineDirective, LineDirective+strlen(LineDirective));
+
+ // Process #define's and #undef's in the order they are given.
+ for (PreprocessorInitOptions::macro_iterator I = InitOpts.macro_begin(),
+ E = InitOpts.macro_end(); I != E; ++I) {
+ if (I->second) // isUndef
+ UndefineBuiltinMacro(PredefineBuffer, I->first.c_str());
+ else
+ DefineBuiltinMacro(PredefineBuffer, I->first.c_str());
+ }
+
+ // If -imacros are specified, include them now. These are processed before
+ // any -include directives.
+ for (PreprocessorInitOptions::imacro_iterator I = InitOpts.imacro_begin(),
+ E = InitOpts.imacro_end(); I != E; ++I)
+ AddImplicitIncludeMacros(PredefineBuffer, *I);
+
+ // Process -include directives.
+ for (PreprocessorInitOptions::include_iterator I = InitOpts.include_begin(),
+ E = InitOpts.include_end(); I != E; ++I) {
+ if (I->second) // isPTH
+ AddImplicitIncludePTH(PredefineBuffer, PP, I->first);
+ else
+ AddImplicitInclude(PredefineBuffer, I->first);
+ }
+
+ LineDirective = "# 2 \"<built-in>\" 2 3\n";
+ PredefineBuffer.insert(PredefineBuffer.end(),
+ LineDirective, LineDirective+strlen(LineDirective));
+
+ // Null terminate PredefinedBuffer and add it.
+ PredefineBuffer.push_back(0);
+ PP.setPredefines(&PredefineBuffer[0]);
+
+ // Once we've read this, we're done.
+ return false;
+}
+
+} // namespace clang
diff --git a/lib/Frontend/Makefile b/lib/Frontend/Makefile
new file mode 100644
index 0000000..8d70847
--- /dev/null
+++ b/lib/Frontend/Makefile
@@ -0,0 +1,18 @@
+##===- clang/lib/Frontend/Makefile -------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangFrontend
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Frontend/ManagerRegistry.cpp b/lib/Frontend/ManagerRegistry.cpp
new file mode 100644
index 0000000..79f1e81
--- /dev/null
+++ b/lib/Frontend/ManagerRegistry.cpp
@@ -0,0 +1,20 @@
+//===- ManagerRegistry.cpp - Pluggble Analyzer module creators --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the pluggable analyzer module creators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ManagerRegistry.h"
+
+using namespace clang;
+
+StoreManagerCreator ManagerRegistry::StoreMgrCreator = 0;
+
+ConstraintManagerCreator ManagerRegistry::ConstraintMgrCreator = 0;
diff --git a/lib/Frontend/PCHReader.cpp b/lib/Frontend/PCHReader.cpp
new file mode 100644
index 0000000..63e4337
--- /dev/null
+++ b/lib/Frontend/PCHReader.cpp
@@ -0,0 +1,2260 @@
+//===--- PCHReader.cpp - Precompiled Headers Reader -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PCHReader class, which reads a precompiled header.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PCHReader.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "../Sema/Sema.h" // FIXME: move Sema headers elsewhere
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <algorithm>
+#include <iterator>
+#include <cstdio>
+#include <sys/stat.h>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// PCH reader implementation
+//===----------------------------------------------------------------------===//
+
+PCHReader::PCHReader(Preprocessor &PP, ASTContext *Context)
+ : SemaObj(0), PP(PP), Context(Context), Consumer(0),
+ IdentifierTableData(0), IdentifierLookupTable(0),
+ IdentifierOffsets(0),
+ MethodPoolLookupTable(0), MethodPoolLookupTableData(0),
+ TotalSelectorsInMethodPool(0), SelectorOffsets(0),
+ TotalNumSelectors(0), NumStatHits(0), NumStatMisses(0),
+ NumSLocEntriesRead(0), NumStatementsRead(0),
+ NumMacrosRead(0), NumMethodPoolSelectorsRead(0), NumMethodPoolMisses(0),
+ NumLexicalDeclContextsRead(0), NumVisibleDeclContextsRead(0) { }
+
+PCHReader::~PCHReader() {}
+
+Expr *PCHReader::ReadDeclExpr() {
+ return dyn_cast_or_null<Expr>(ReadStmt(DeclsCursor));
+}
+
+Expr *PCHReader::ReadTypeExpr() {
+ return dyn_cast_or_null<Expr>(ReadStmt(Stream));
+}
+
+
+namespace {
+class VISIBILITY_HIDDEN PCHMethodPoolLookupTrait {
+ PCHReader &Reader;
+
+public:
+ typedef std::pair<ObjCMethodList, ObjCMethodList> data_type;
+
+ typedef Selector external_key_type;
+ typedef external_key_type internal_key_type;
+
+ explicit PCHMethodPoolLookupTrait(PCHReader &Reader) : Reader(Reader) { }
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return a == b;
+ }
+
+ static unsigned ComputeHash(Selector Sel) {
+ unsigned N = Sel.getNumArgs();
+ if (N == 0)
+ ++N;
+ unsigned R = 5381;
+ for (unsigned I = 0; I != N; ++I)
+ if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(I))
+ R = clang::BernsteinHashPartial(II->getName(), II->getLength(), R);
+ return R;
+ }
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ using namespace clang::io;
+ unsigned KeyLen = ReadUnalignedLE16(d);
+ unsigned DataLen = ReadUnalignedLE16(d);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ internal_key_type ReadKey(const unsigned char* d, unsigned) {
+ using namespace clang::io;
+ SelectorTable &SelTable = Reader.getContext()->Selectors;
+ unsigned N = ReadUnalignedLE16(d);
+ IdentifierInfo *FirstII
+ = Reader.DecodeIdentifierInfo(ReadUnalignedLE32(d));
+ if (N == 0)
+ return SelTable.getNullarySelector(FirstII);
+ else if (N == 1)
+ return SelTable.getUnarySelector(FirstII);
+
+ llvm::SmallVector<IdentifierInfo *, 16> Args;
+ Args.push_back(FirstII);
+ for (unsigned I = 1; I != N; ++I)
+ Args.push_back(Reader.DecodeIdentifierInfo(ReadUnalignedLE32(d)));
+
+ return SelTable.getSelector(N, Args.data());
+ }
+
+ data_type ReadData(Selector, const unsigned char* d, unsigned DataLen) {
+ using namespace clang::io;
+ unsigned NumInstanceMethods = ReadUnalignedLE16(d);
+ unsigned NumFactoryMethods = ReadUnalignedLE16(d);
+
+ data_type Result;
+
+ // Load instance methods
+ ObjCMethodList *Prev = 0;
+ for (unsigned I = 0; I != NumInstanceMethods; ++I) {
+ ObjCMethodDecl *Method
+ = cast<ObjCMethodDecl>(Reader.GetDecl(ReadUnalignedLE32(d)));
+ if (!Result.first.Method) {
+ // This is the first method, which is the easy case.
+ Result.first.Method = Method;
+ Prev = &Result.first;
+ continue;
+ }
+
+ Prev->Next = new ObjCMethodList(Method, 0);
+ Prev = Prev->Next;
+ }
+
+ // Load factory methods
+ Prev = 0;
+ for (unsigned I = 0; I != NumFactoryMethods; ++I) {
+ ObjCMethodDecl *Method
+ = cast<ObjCMethodDecl>(Reader.GetDecl(ReadUnalignedLE32(d)));
+ if (!Result.second.Method) {
+ // This is the first method, which is the easy case.
+ Result.second.Method = Method;
+ Prev = &Result.second;
+ continue;
+ }
+
+ Prev->Next = new ObjCMethodList(Method, 0);
+ Prev = Prev->Next;
+ }
+
+ return Result;
+ }
+};
+
+} // end anonymous namespace
+
+/// \brief The on-disk hash table used for the global method pool.
+typedef OnDiskChainedHashTable<PCHMethodPoolLookupTrait>
+ PCHMethodPoolLookupTable;
+
+namespace {
+class VISIBILITY_HIDDEN PCHIdentifierLookupTrait {
+ PCHReader &Reader;
+
+ // If we know the IdentifierInfo in advance, it is here and we will
+ // not build a new one. Used when deserializing information about an
+ // identifier that was constructed before the PCH file was read.
+ IdentifierInfo *KnownII;
+
+public:
+ typedef IdentifierInfo * data_type;
+
+ typedef const std::pair<const char*, unsigned> external_key_type;
+
+ typedef external_key_type internal_key_type;
+
+ explicit PCHIdentifierLookupTrait(PCHReader &Reader, IdentifierInfo *II = 0)
+ : Reader(Reader), KnownII(II) { }
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
+ : false;
+ }
+
+ static unsigned ComputeHash(const internal_key_type& a) {
+ return BernsteinHash(a.first, a.second);
+ }
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ using namespace clang::io;
+ unsigned DataLen = ReadUnalignedLE16(d);
+ unsigned KeyLen = ReadUnalignedLE16(d);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ static std::pair<const char*, unsigned>
+ ReadKey(const unsigned char* d, unsigned n) {
+ assert(n >= 2 && d[n-1] == '\0');
+ return std::make_pair((const char*) d, n-1);
+ }
+
+ IdentifierInfo *ReadData(const internal_key_type& k,
+ const unsigned char* d,
+ unsigned DataLen) {
+ using namespace clang::io;
+ pch::IdentID ID = ReadUnalignedLE32(d);
+ bool IsInteresting = ID & 0x01;
+
+ // Wipe out the "is interesting" bit.
+ ID = ID >> 1;
+
+ if (!IsInteresting) {
+ // For unintersting identifiers, just build the IdentifierInfo
+ // and associate it with the persistent ID.
+ IdentifierInfo *II = KnownII;
+ if (!II)
+ II = &Reader.getIdentifierTable().CreateIdentifierInfo(
+ k.first, k.first + k.second);
+ Reader.SetIdentifierInfo(ID, II);
+ return II;
+ }
+
+ unsigned Bits = ReadUnalignedLE16(d);
+ bool CPlusPlusOperatorKeyword = Bits & 0x01;
+ Bits >>= 1;
+ bool Poisoned = Bits & 0x01;
+ Bits >>= 1;
+ bool ExtensionToken = Bits & 0x01;
+ Bits >>= 1;
+ bool hasMacroDefinition = Bits & 0x01;
+ Bits >>= 1;
+ unsigned ObjCOrBuiltinID = Bits & 0x3FF;
+ Bits >>= 10;
+
+ assert(Bits == 0 && "Extra bits in the identifier?");
+ DataLen -= 6;
+
+ // Build the IdentifierInfo itself and link the identifier ID with
+ // the new IdentifierInfo.
+ IdentifierInfo *II = KnownII;
+ if (!II)
+ II = &Reader.getIdentifierTable().CreateIdentifierInfo(
+ k.first, k.first + k.second);
+ Reader.SetIdentifierInfo(ID, II);
+
+ // Set or check the various bits in the IdentifierInfo structure.
+ // FIXME: Load token IDs lazily, too?
+ II->setObjCOrBuiltinID(ObjCOrBuiltinID);
+ assert(II->isExtensionToken() == ExtensionToken &&
+ "Incorrect extension token flag");
+ (void)ExtensionToken;
+ II->setIsPoisoned(Poisoned);
+ assert(II->isCPlusPlusOperatorKeyword() == CPlusPlusOperatorKeyword &&
+ "Incorrect C++ operator keyword flag");
+ (void)CPlusPlusOperatorKeyword;
+
+ // If this identifier is a macro, deserialize the macro
+ // definition.
+ if (hasMacroDefinition) {
+ uint32_t Offset = ReadUnalignedLE32(d);
+ Reader.ReadMacroRecord(Offset);
+ DataLen -= 4;
+ }
+
+ // Read all of the declarations visible at global scope with this
+ // name.
+ Sema *SemaObj = Reader.getSema();
+ if (Reader.getContext() == 0) return II;
+
+ while (DataLen > 0) {
+ NamedDecl *D = cast<NamedDecl>(Reader.GetDecl(ReadUnalignedLE32(d)));
+ if (SemaObj) {
+ // Introduce this declaration into the translation-unit scope
+ // and add it to the declaration chain for this identifier, so
+ // that (unqualified) name lookup will find it.
+ SemaObj->TUScope->AddDecl(Action::DeclPtrTy::make(D));
+ SemaObj->IdResolver.AddDeclToIdentifierChain(II, D);
+ } else {
+ // Queue this declaration so that it will be added to the
+ // translation unit scope and identifier's declaration chain
+ // once a Sema object is known.
+ Reader.PreloadedDecls.push_back(D);
+ }
+
+ DataLen -= 4;
+ }
+ return II;
+ }
+};
+
+} // end anonymous namespace
+
+/// \brief The on-disk hash table used to contain information about
+/// all of the identifiers in the program.
+typedef OnDiskChainedHashTable<PCHIdentifierLookupTrait>
+ PCHIdentifierLookupTable;
+
+// FIXME: use the diagnostics machinery
+bool PCHReader::Error(const char *Msg) {
+ Diagnostic &Diags = PP.getDiagnostics();
+ unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Fatal, Msg);
+ Diag(DiagID);
+ return true;
+}
+
+/// \brief Split the given string into a vector of lines, eliminating
+/// any empty lines in the process.
+///
+/// \param Str the string to split.
+/// \param Len the length of Str.
+/// \param KeepEmptyLines true if empty lines should be included
+/// \returns a vector of lines, with the line endings removed
+std::vector<std::string> splitLines(const char *Str, unsigned Len,
+ bool KeepEmptyLines = false) {
+ std::vector<std::string> Lines;
+ for (unsigned LineStart = 0; LineStart < Len; ++LineStart) {
+ unsigned LineEnd = LineStart;
+ while (LineEnd < Len && Str[LineEnd] != '\n')
+ ++LineEnd;
+ if (LineStart != LineEnd || KeepEmptyLines)
+ Lines.push_back(std::string(&Str[LineStart], &Str[LineEnd]));
+ LineStart = LineEnd;
+ }
+ return Lines;
+}
+
+/// \brief Determine whether the string Haystack starts with the
+/// substring Needle.
+static bool startsWith(const std::string &Haystack, const char *Needle) {
+ for (unsigned I = 0, N = Haystack.size(); Needle[I] != 0; ++I) {
+ if (I == N)
+ return false;
+ if (Haystack[I] != Needle[I])
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Determine whether the string Haystack starts with the
+/// substring Needle.
+static inline bool startsWith(const std::string &Haystack,
+ const std::string &Needle) {
+ return startsWith(Haystack, Needle.c_str());
+}
+
+/// \brief Check the contents of the predefines buffer against the
+/// contents of the predefines buffer used to build the PCH file.
+///
+/// The contents of the two predefines buffers should be the same. If
+/// not, then some command-line option changed the preprocessor state
+/// and we must reject the PCH file.
+///
+/// \param PCHPredef The start of the predefines buffer in the PCH
+/// file.
+///
+/// \param PCHPredefLen The length of the predefines buffer in the PCH
+/// file.
+///
+/// \param PCHBufferID The FileID for the PCH predefines buffer.
+///
+/// \returns true if there was a mismatch (in which case the PCH file
+/// should be ignored), or false otherwise.
+bool PCHReader::CheckPredefinesBuffer(const char *PCHPredef,
+ unsigned PCHPredefLen,
+ FileID PCHBufferID) {
+ const char *Predef = PP.getPredefines().c_str();
+ unsigned PredefLen = PP.getPredefines().size();
+
+ // If the two predefines buffers compare equal, we're done!
+ if (PredefLen == PCHPredefLen &&
+ strncmp(Predef, PCHPredef, PCHPredefLen) == 0)
+ return false;
+
+ SourceManager &SourceMgr = PP.getSourceManager();
+
+ // The predefines buffers are different. Determine what the
+ // differences are, and whether they require us to reject the PCH
+ // file.
+ std::vector<std::string> CmdLineLines = splitLines(Predef, PredefLen);
+ std::vector<std::string> PCHLines = splitLines(PCHPredef, PCHPredefLen);
+
+ // Sort both sets of predefined buffer lines, since
+ std::sort(CmdLineLines.begin(), CmdLineLines.end());
+ std::sort(PCHLines.begin(), PCHLines.end());
+
+ // Determine which predefines that where used to build the PCH file
+ // are missing from the command line.
+ std::vector<std::string> MissingPredefines;
+ std::set_difference(PCHLines.begin(), PCHLines.end(),
+ CmdLineLines.begin(), CmdLineLines.end(),
+ std::back_inserter(MissingPredefines));
+
+ bool MissingDefines = false;
+ bool ConflictingDefines = false;
+ for (unsigned I = 0, N = MissingPredefines.size(); I != N; ++I) {
+ const std::string &Missing = MissingPredefines[I];
+ if (!startsWith(Missing, "#define ") != 0) {
+ Diag(diag::warn_pch_compiler_options_mismatch);
+ return true;
+ }
+
+ // This is a macro definition. Determine the name of the macro
+ // we're defining.
+ std::string::size_type StartOfMacroName = strlen("#define ");
+ std::string::size_type EndOfMacroName
+ = Missing.find_first_of("( \n\r", StartOfMacroName);
+ assert(EndOfMacroName != std::string::npos &&
+ "Couldn't find the end of the macro name");
+ std::string MacroName = Missing.substr(StartOfMacroName,
+ EndOfMacroName - StartOfMacroName);
+
+ // Determine whether this macro was given a different definition
+ // on the command line.
+ std::string MacroDefStart = "#define " + MacroName;
+ std::string::size_type MacroDefLen = MacroDefStart.size();
+ std::vector<std::string>::iterator ConflictPos
+ = std::lower_bound(CmdLineLines.begin(), CmdLineLines.end(),
+ MacroDefStart);
+ for (; ConflictPos != CmdLineLines.end(); ++ConflictPos) {
+ if (!startsWith(*ConflictPos, MacroDefStart)) {
+ // Different macro; we're done.
+ ConflictPos = CmdLineLines.end();
+ break;
+ }
+
+ assert(ConflictPos->size() > MacroDefLen &&
+ "Invalid #define in predefines buffer?");
+ if ((*ConflictPos)[MacroDefLen] != ' ' &&
+ (*ConflictPos)[MacroDefLen] != '(')
+ continue; // Longer macro name; keep trying.
+
+ // We found a conflicting macro definition.
+ break;
+ }
+
+ if (ConflictPos != CmdLineLines.end()) {
+ Diag(diag::warn_cmdline_conflicting_macro_def)
+ << MacroName;
+
+ // Show the definition of this macro within the PCH file.
+ const char *MissingDef = strstr(PCHPredef, Missing.c_str());
+ unsigned Offset = MissingDef - PCHPredef;
+ SourceLocation PCHMissingLoc
+ = SourceMgr.getLocForStartOfFile(PCHBufferID)
+ .getFileLocWithOffset(Offset);
+ Diag(PCHMissingLoc, diag::note_pch_macro_defined_as)
+ << MacroName;
+
+ ConflictingDefines = true;
+ continue;
+ }
+
+ // If the macro doesn't conflict, then we'll just pick up the
+ // macro definition from the PCH file. Warn the user that they
+ // made a mistake.
+ if (ConflictingDefines)
+ continue; // Don't complain if there are already conflicting defs
+
+ if (!MissingDefines) {
+ Diag(diag::warn_cmdline_missing_macro_defs);
+ MissingDefines = true;
+ }
+
+ // Show the definition of this macro within the PCH file.
+ const char *MissingDef = strstr(PCHPredef, Missing.c_str());
+ unsigned Offset = MissingDef - PCHPredef;
+ SourceLocation PCHMissingLoc
+ = SourceMgr.getLocForStartOfFile(PCHBufferID)
+ .getFileLocWithOffset(Offset);
+ Diag(PCHMissingLoc, diag::note_using_macro_def_from_pch);
+ }
+
+ if (ConflictingDefines)
+ return true;
+
+ // Determine what predefines were introduced based on command-line
+ // parameters that were not present when building the PCH
+ // file. Extra #defines are okay, so long as the identifiers being
+ // defined were not used within the precompiled header.
+ std::vector<std::string> ExtraPredefines;
+ std::set_difference(CmdLineLines.begin(), CmdLineLines.end(),
+ PCHLines.begin(), PCHLines.end(),
+ std::back_inserter(ExtraPredefines));
+ for (unsigned I = 0, N = ExtraPredefines.size(); I != N; ++I) {
+ const std::string &Extra = ExtraPredefines[I];
+ if (!startsWith(Extra, "#define ") != 0) {
+ Diag(diag::warn_pch_compiler_options_mismatch);
+ return true;
+ }
+
+ // This is an extra macro definition. Determine the name of the
+ // macro we're defining.
+ std::string::size_type StartOfMacroName = strlen("#define ");
+ std::string::size_type EndOfMacroName
+ = Extra.find_first_of("( \n\r", StartOfMacroName);
+ assert(EndOfMacroName != std::string::npos &&
+ "Couldn't find the end of the macro name");
+ std::string MacroName = Extra.substr(StartOfMacroName,
+ EndOfMacroName - StartOfMacroName);
+
+ // Check whether this name was used somewhere in the PCH file. If
+ // so, defining it as a macro could change behavior, so we reject
+ // the PCH file.
+ if (IdentifierInfo *II = get(MacroName.c_str(),
+ MacroName.c_str() + MacroName.size())) {
+ Diag(diag::warn_macro_name_used_in_pch)
+ << II;
+ return true;
+ }
+
+ // Add this definition to the suggested predefines buffer.
+ SuggestedPredefines += Extra;
+ SuggestedPredefines += '\n';
+ }
+
+ // If we get here, it's because the predefines buffer had compatible
+ // contents. Accept the PCH file.
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Source Manager Deserialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Read the line table in the source manager block.
+/// \returns true if ther was an error.
+static bool ParseLineTable(SourceManager &SourceMgr,
+ llvm::SmallVectorImpl<uint64_t> &Record) {
+ unsigned Idx = 0;
+ LineTableInfo &LineTable = SourceMgr.getLineTable();
+
+ // Parse the file names
+ std::map<int, int> FileIDs;
+ for (int I = 0, N = Record[Idx++]; I != N; ++I) {
+ // Extract the file name
+ unsigned FilenameLen = Record[Idx++];
+ std::string Filename(&Record[Idx], &Record[Idx] + FilenameLen);
+ Idx += FilenameLen;
+ FileIDs[I] = LineTable.getLineTableFilenameID(Filename.c_str(),
+ Filename.size());
+ }
+
+ // Parse the line entries
+ std::vector<LineEntry> Entries;
+ while (Idx < Record.size()) {
+ int FID = FileIDs[Record[Idx++]];
+
+ // Extract the line entries
+ unsigned NumEntries = Record[Idx++];
+ Entries.clear();
+ Entries.reserve(NumEntries);
+ for (unsigned I = 0; I != NumEntries; ++I) {
+ unsigned FileOffset = Record[Idx++];
+ unsigned LineNo = Record[Idx++];
+ int FilenameID = Record[Idx++];
+ SrcMgr::CharacteristicKind FileKind
+ = (SrcMgr::CharacteristicKind)Record[Idx++];
+ unsigned IncludeOffset = Record[Idx++];
+ Entries.push_back(LineEntry::get(FileOffset, LineNo, FilenameID,
+ FileKind, IncludeOffset));
+ }
+ LineTable.AddEntry(FID, Entries);
+ }
+
+ return false;
+}
+
+namespace {
+
+class VISIBILITY_HIDDEN PCHStatData {
+public:
+ const bool hasStat;
+ const ino_t ino;
+ const dev_t dev;
+ const mode_t mode;
+ const time_t mtime;
+ const off_t size;
+
+ PCHStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
+ : hasStat(true), ino(i), dev(d), mode(mo), mtime(m), size(s) {}
+
+ PCHStatData()
+ : hasStat(false), ino(0), dev(0), mode(0), mtime(0), size(0) {}
+};
+
+class VISIBILITY_HIDDEN PCHStatLookupTrait {
+ public:
+ typedef const char *external_key_type;
+ typedef const char *internal_key_type;
+
+ typedef PCHStatData data_type;
+
+ static unsigned ComputeHash(const char *path) {
+ return BernsteinHash(path);
+ }
+
+ static internal_key_type GetInternalKey(const char *path) { return path; }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ return strcmp(a, b) == 0;
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
+ unsigned DataLen = (unsigned) *d++;
+ return std::make_pair(KeyLen + 1, DataLen);
+ }
+
+ static internal_key_type ReadKey(const unsigned char *d, unsigned) {
+ return (const char *)d;
+ }
+
+ static data_type ReadData(const internal_key_type, const unsigned char *d,
+ unsigned /*DataLen*/) {
+ using namespace clang::io;
+
+ if (*d++ == 1)
+ return data_type();
+
+ ino_t ino = (ino_t) ReadUnalignedLE32(d);
+ dev_t dev = (dev_t) ReadUnalignedLE32(d);
+ mode_t mode = (mode_t) ReadUnalignedLE16(d);
+ time_t mtime = (time_t) ReadUnalignedLE64(d);
+ off_t size = (off_t) ReadUnalignedLE64(d);
+ return data_type(ino, dev, mode, mtime, size);
+ }
+};
+
+/// \brief stat() cache for precompiled headers.
+///
+/// This cache is very similar to the stat cache used by pretokenized
+/// headers.
+class VISIBILITY_HIDDEN PCHStatCache : public StatSysCallCache {
+ typedef OnDiskChainedHashTable<PCHStatLookupTrait> CacheTy;
+ CacheTy *Cache;
+
+ unsigned &NumStatHits, &NumStatMisses;
+public:
+ PCHStatCache(const unsigned char *Buckets,
+ const unsigned char *Base,
+ unsigned &NumStatHits,
+ unsigned &NumStatMisses)
+ : Cache(0), NumStatHits(NumStatHits), NumStatMisses(NumStatMisses) {
+ Cache = CacheTy::Create(Buckets, Base);
+ }
+
+ ~PCHStatCache() { delete Cache; }
+
+ int stat(const char *path, struct stat *buf) {
+ // Do the lookup for the file's data in the PCH file.
+ CacheTy::iterator I = Cache->find(path);
+
+ // If we don't get a hit in the PCH file just forward to 'stat'.
+ if (I == Cache->end()) {
+ ++NumStatMisses;
+ return ::stat(path, buf);
+ }
+
+ ++NumStatHits;
+ PCHStatData Data = *I;
+
+ if (!Data.hasStat)
+ return 1;
+
+ buf->st_ino = Data.ino;
+ buf->st_dev = Data.dev;
+ buf->st_mtime = Data.mtime;
+ buf->st_mode = Data.mode;
+ buf->st_size = Data.size;
+ return 0;
+ }
+};
+} // end anonymous namespace
+
+
+/// \brief Read the source manager block
+PCHReader::PCHReadResult PCHReader::ReadSourceManagerBlock() {
+ using namespace SrcMgr;
+
+ // Set the source-location entry cursor to the current position in
+ // the stream. This cursor will be used to read the contents of the
+ // source manager block initially, and then lazily read
+ // source-location entries as needed.
+ SLocEntryCursor = Stream;
+
+ // The stream itself is going to skip over the source manager block.
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+
+ // Enter the source manager block.
+ if (SLocEntryCursor.EnterSubBlock(pch::SOURCE_MANAGER_BLOCK_ID)) {
+ Error("malformed source manager block record in PCH file");
+ return Failure;
+ }
+
+ SourceManager &SourceMgr = PP.getSourceManager();
+ RecordData Record;
+ unsigned NumHeaderInfos = 0;
+ while (true) {
+ unsigned Code = SLocEntryCursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (SLocEntryCursor.ReadBlockEnd()) {
+ Error("error at end of Source Manager block in PCH file");
+ return Failure;
+ }
+ return Success;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ SLocEntryCursor.ReadSubBlockID();
+ if (SLocEntryCursor.SkipBlock()) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ SLocEntryCursor.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a record.
+ const char *BlobStart;
+ unsigned BlobLen;
+ Record.clear();
+ switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case pch::SM_LINE_TABLE:
+ if (ParseLineTable(SourceMgr, Record))
+ return Failure;
+ break;
+
+ case pch::SM_HEADER_FILE_INFO: {
+ HeaderFileInfo HFI;
+ HFI.isImport = Record[0];
+ HFI.DirInfo = Record[1];
+ HFI.NumIncludes = Record[2];
+ HFI.ControllingMacroID = Record[3];
+ PP.getHeaderSearchInfo().setHeaderFileInfoForUID(HFI, NumHeaderInfos++);
+ break;
+ }
+
+ case pch::SM_SLOC_FILE_ENTRY:
+ case pch::SM_SLOC_BUFFER_ENTRY:
+ case pch::SM_SLOC_INSTANTIATION_ENTRY:
+ // Once we hit one of the source location entries, we're done.
+ return Success;
+ }
+ }
+}
+
+/// \brief Read in the source location entry with the given ID.
+PCHReader::PCHReadResult PCHReader::ReadSLocEntryRecord(unsigned ID) {
+ if (ID == 0)
+ return Success;
+
+ if (ID > TotalNumSLocEntries) {
+ Error("source location entry ID out-of-range for PCH file");
+ return Failure;
+ }
+
+ ++NumSLocEntriesRead;
+ SLocEntryCursor.JumpToBit(SLocOffsets[ID - 1]);
+ unsigned Code = SLocEntryCursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK ||
+ Code == llvm::bitc::ENTER_SUBBLOCK ||
+ Code == llvm::bitc::DEFINE_ABBREV) {
+ Error("incorrectly-formatted source location entry in PCH file");
+ return Failure;
+ }
+
+ SourceManager &SourceMgr = PP.getSourceManager();
+ RecordData Record;
+ const char *BlobStart;
+ unsigned BlobLen;
+ switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default:
+ Error("incorrectly-formatted source location entry in PCH file");
+ return Failure;
+
+ case pch::SM_SLOC_FILE_ENTRY: {
+ const FileEntry *File
+ = PP.getFileManager().getFile(BlobStart, BlobStart + BlobLen);
+ // FIXME: Error recovery if file cannot be found.
+ FileID FID = SourceMgr.createFileID(File,
+ SourceLocation::getFromRawEncoding(Record[1]),
+ (SrcMgr::CharacteristicKind)Record[2],
+ ID, Record[0]);
+ if (Record[3])
+ const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile())
+ .setHasLineDirectives();
+
+ break;
+ }
+
+ case pch::SM_SLOC_BUFFER_ENTRY: {
+ const char *Name = BlobStart;
+ unsigned Offset = Record[0];
+ unsigned Code = SLocEntryCursor.ReadCode();
+ Record.clear();
+ unsigned RecCode
+ = SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen);
+ assert(RecCode == pch::SM_SLOC_BUFFER_BLOB && "Ill-formed PCH file");
+ (void)RecCode;
+ llvm::MemoryBuffer *Buffer
+ = llvm::MemoryBuffer::getMemBuffer(BlobStart,
+ BlobStart + BlobLen - 1,
+ Name);
+ FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID, Offset);
+
+ if (strcmp(Name, "<built-in>") == 0) {
+ PCHPredefinesBufferID = BufferID;
+ PCHPredefines = BlobStart;
+ PCHPredefinesLen = BlobLen - 1;
+ }
+
+ break;
+ }
+
+ case pch::SM_SLOC_INSTANTIATION_ENTRY: {
+ SourceLocation SpellingLoc
+ = SourceLocation::getFromRawEncoding(Record[1]);
+ SourceMgr.createInstantiationLoc(SpellingLoc,
+ SourceLocation::getFromRawEncoding(Record[2]),
+ SourceLocation::getFromRawEncoding(Record[3]),
+ Record[4],
+ ID,
+ Record[0]);
+ break;
+ }
+ }
+
+ return Success;
+}
+
+/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
+/// specified cursor. Read the abbreviations that are at the top of the block
+/// and then leave the cursor pointing into the block.
+bool PCHReader::ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor,
+ unsigned BlockID) {
+ if (Cursor.EnterSubBlock(BlockID)) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+
+ while (true) {
+ unsigned Code = Cursor.ReadCode();
+
+ // We expect all abbrevs to be at the start of the block.
+ if (Code != llvm::bitc::DEFINE_ABBREV)
+ return false;
+ Cursor.ReadAbbrevRecord();
+ }
+}
+
+void PCHReader::ReadMacroRecord(uint64_t Offset) {
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this macro.
+ SavedStreamPosition SavedPosition(Stream);
+
+ Stream.JumpToBit(Offset);
+ RecordData Record;
+ llvm::SmallVector<IdentifierInfo*, 16> MacroArgs;
+ MacroInfo *Macro = 0;
+
+ while (true) {
+ unsigned Code = Stream.ReadCode();
+ switch (Code) {
+ case llvm::bitc::END_BLOCK:
+ return;
+
+ case llvm::bitc::ENTER_SUBBLOCK:
+ // No known subblocks, always skip them.
+ Stream.ReadSubBlockID();
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in PCH file");
+ return;
+ }
+ continue;
+
+ case llvm::bitc::DEFINE_ABBREV:
+ Stream.ReadAbbrevRecord();
+ continue;
+ default: break;
+ }
+
+ // Read a record.
+ Record.clear();
+ pch::PreprocessorRecordTypes RecType =
+ (pch::PreprocessorRecordTypes)Stream.ReadRecord(Code, Record);
+ switch (RecType) {
+ case pch::PP_MACRO_OBJECT_LIKE:
+ case pch::PP_MACRO_FUNCTION_LIKE: {
+ // If we already have a macro, that means that we've hit the end
+ // of the definition of the macro we were looking for. We're
+ // done.
+ if (Macro)
+ return;
+
+ IdentifierInfo *II = DecodeIdentifierInfo(Record[0]);
+ if (II == 0) {
+ Error("macro must have a name in PCH file");
+ return;
+ }
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(Record[1]);
+ bool isUsed = Record[2];
+
+ MacroInfo *MI = PP.AllocateMacroInfo(Loc);
+ MI->setIsUsed(isUsed);
+
+ if (RecType == pch::PP_MACRO_FUNCTION_LIKE) {
+ // Decode function-like macro info.
+ bool isC99VarArgs = Record[3];
+ bool isGNUVarArgs = Record[4];
+ MacroArgs.clear();
+ unsigned NumArgs = Record[5];
+ for (unsigned i = 0; i != NumArgs; ++i)
+ MacroArgs.push_back(DecodeIdentifierInfo(Record[6+i]));
+
+ // Install function-like macro info.
+ MI->setIsFunctionLike();
+ if (isC99VarArgs) MI->setIsC99Varargs();
+ if (isGNUVarArgs) MI->setIsGNUVarargs();
+ MI->setArgumentList(MacroArgs.data(), MacroArgs.size(),
+ PP.getPreprocessorAllocator());
+ }
+
+ // Finally, install the macro.
+ PP.setMacroInfo(II, MI);
+
+ // Remember that we saw this macro last so that we add the tokens that
+ // form its body to it.
+ Macro = MI;
+ ++NumMacrosRead;
+ break;
+ }
+
+ case pch::PP_TOKEN: {
+ // If we see a TOKEN before a PP_MACRO_*, then the file is
+ // erroneous, just pretend we didn't see this.
+ if (Macro == 0) break;
+
+ Token Tok;
+ Tok.startToken();
+ Tok.setLocation(SourceLocation::getFromRawEncoding(Record[0]));
+ Tok.setLength(Record[1]);
+ if (IdentifierInfo *II = DecodeIdentifierInfo(Record[2]))
+ Tok.setIdentifierInfo(II);
+ Tok.setKind((tok::TokenKind)Record[3]);
+ Tok.setFlag((Token::TokenFlags)Record[4]);
+ Macro->AddTokenToBody(Tok);
+ break;
+ }
+ }
+ }
+}
+
+PCHReader::PCHReadResult
+PCHReader::ReadPCHBlock() {
+ if (Stream.EnterSubBlock(pch::PCH_BLOCK_ID)) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+
+ // Read all of the records and blocks for the PCH file.
+ RecordData Record;
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd()) {
+ Error("error at end of module block in PCH file");
+ return Failure;
+ }
+
+ return Success;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ switch (Stream.ReadSubBlockID()) {
+ case pch::TYPES_BLOCK_ID: // Skip types block (lazily loaded)
+ default: // Skip unknown content.
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+ break;
+
+ case pch::DECLS_BLOCK_ID:
+ // We lazily load the decls block, but we want to set up the
+ // DeclsCursor cursor to point into it. Clone our current bitcode
+ // cursor to it, enter the block and read the abbrevs in that block.
+ // With the main cursor, we just skip over it.
+ DeclsCursor = Stream;
+ if (Stream.SkipBlock() || // Skip with the main cursor.
+ // Read the abbrevs.
+ ReadBlockAbbrevs(DeclsCursor, pch::DECLS_BLOCK_ID)) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+ break;
+
+ case pch::PREPROCESSOR_BLOCK_ID:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+ break;
+
+ case pch::SOURCE_MANAGER_BLOCK_ID:
+ switch (ReadSourceManagerBlock()) {
+ case Success:
+ break;
+
+ case Failure:
+ Error("malformed source manager block in PCH file");
+ return Failure;
+
+ case IgnorePCH:
+ return IgnorePCH;
+ }
+ break;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read and process a record.
+ Record.clear();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ switch ((pch::PCHRecordTypes)Stream.ReadRecord(Code, Record,
+ &BlobStart, &BlobLen)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case pch::TYPE_OFFSET:
+ if (!TypesLoaded.empty()) {
+ Error("duplicate TYPE_OFFSET record in PCH file");
+ return Failure;
+ }
+ TypeOffsets = (const uint32_t *)BlobStart;
+ TypesLoaded.resize(Record[0]);
+ break;
+
+ case pch::DECL_OFFSET:
+ if (!DeclsLoaded.empty()) {
+ Error("duplicate DECL_OFFSET record in PCH file");
+ return Failure;
+ }
+ DeclOffsets = (const uint32_t *)BlobStart;
+ DeclsLoaded.resize(Record[0]);
+ break;
+
+ case pch::LANGUAGE_OPTIONS:
+ if (ParseLanguageOptions(Record))
+ return IgnorePCH;
+ break;
+
+ case pch::METADATA: {
+ if (Record[0] != pch::VERSION_MAJOR) {
+ Diag(Record[0] < pch::VERSION_MAJOR? diag::warn_pch_version_too_old
+ : diag::warn_pch_version_too_new);
+ return IgnorePCH;
+ }
+
+ std::string TargetTriple(BlobStart, BlobLen);
+ if (TargetTriple != PP.getTargetInfo().getTargetTriple()) {
+ Diag(diag::warn_pch_target_triple)
+ << TargetTriple << PP.getTargetInfo().getTargetTriple();
+ return IgnorePCH;
+ }
+ break;
+ }
+
+ case pch::IDENTIFIER_TABLE:
+ IdentifierTableData = BlobStart;
+ if (Record[0]) {
+ IdentifierLookupTable
+ = PCHIdentifierLookupTable::Create(
+ (const unsigned char *)IdentifierTableData + Record[0],
+ (const unsigned char *)IdentifierTableData,
+ PCHIdentifierLookupTrait(*this));
+ PP.getIdentifierTable().setExternalIdentifierLookup(this);
+ }
+ break;
+
+ case pch::IDENTIFIER_OFFSET:
+ if (!IdentifiersLoaded.empty()) {
+ Error("duplicate IDENTIFIER_OFFSET record in PCH file");
+ return Failure;
+ }
+ IdentifierOffsets = (const uint32_t *)BlobStart;
+ IdentifiersLoaded.resize(Record[0]);
+ PP.getHeaderSearchInfo().SetExternalLookup(this);
+ break;
+
+ case pch::EXTERNAL_DEFINITIONS:
+ if (!ExternalDefinitions.empty()) {
+ Error("duplicate EXTERNAL_DEFINITIONS record in PCH file");
+ return Failure;
+ }
+ ExternalDefinitions.swap(Record);
+ break;
+
+ case pch::SPECIAL_TYPES:
+ SpecialTypes.swap(Record);
+ break;
+
+ case pch::STATISTICS:
+ TotalNumStatements = Record[0];
+ TotalNumMacros = Record[1];
+ TotalLexicalDeclContexts = Record[2];
+ TotalVisibleDeclContexts = Record[3];
+ break;
+
+ case pch::TENTATIVE_DEFINITIONS:
+ if (!TentativeDefinitions.empty()) {
+ Error("duplicate TENTATIVE_DEFINITIONS record in PCH file");
+ return Failure;
+ }
+ TentativeDefinitions.swap(Record);
+ break;
+
+ case pch::LOCALLY_SCOPED_EXTERNAL_DECLS:
+ if (!LocallyScopedExternalDecls.empty()) {
+ Error("duplicate LOCALLY_SCOPED_EXTERNAL_DECLS record in PCH file");
+ return Failure;
+ }
+ LocallyScopedExternalDecls.swap(Record);
+ break;
+
+ case pch::SELECTOR_OFFSETS:
+ SelectorOffsets = (const uint32_t *)BlobStart;
+ TotalNumSelectors = Record[0];
+ SelectorsLoaded.resize(TotalNumSelectors);
+ break;
+
+ case pch::METHOD_POOL:
+ MethodPoolLookupTableData = (const unsigned char *)BlobStart;
+ if (Record[0])
+ MethodPoolLookupTable
+ = PCHMethodPoolLookupTable::Create(
+ MethodPoolLookupTableData + Record[0],
+ MethodPoolLookupTableData,
+ PCHMethodPoolLookupTrait(*this));
+ TotalSelectorsInMethodPool = Record[1];
+ break;
+
+ case pch::PP_COUNTER_VALUE:
+ if (!Record.empty())
+ PP.setCounterValue(Record[0]);
+ break;
+
+ case pch::SOURCE_LOCATION_OFFSETS:
+ SLocOffsets = (const uint32_t *)BlobStart;
+ TotalNumSLocEntries = Record[0];
+ PP.getSourceManager().PreallocateSLocEntries(this,
+ TotalNumSLocEntries,
+ Record[1]);
+ break;
+
+ case pch::SOURCE_LOCATION_PRELOADS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I) {
+ PCHReadResult Result = ReadSLocEntryRecord(Record[I]);
+ if (Result != Success)
+ return Result;
+ }
+ break;
+
+ case pch::STAT_CACHE:
+ PP.getFileManager().setStatCache(
+ new PCHStatCache((const unsigned char *)BlobStart + Record[0],
+ (const unsigned char *)BlobStart,
+ NumStatHits, NumStatMisses));
+ break;
+
+ case pch::EXT_VECTOR_DECLS:
+ if (!ExtVectorDecls.empty()) {
+ Error("duplicate EXT_VECTOR_DECLS record in PCH file");
+ return Failure;
+ }
+ ExtVectorDecls.swap(Record);
+ break;
+
+ case pch::OBJC_CATEGORY_IMPLEMENTATIONS:
+ if (!ObjCCategoryImpls.empty()) {
+ Error("duplicate OBJC_CATEGORY_IMPLEMENTATIONS record in PCH file");
+ return Failure;
+ }
+ ObjCCategoryImpls.swap(Record);
+ break;
+
+ case pch::ORIGINAL_FILE_NAME:
+ OriginalFileName.assign(BlobStart, BlobLen);
+ break;
+ }
+ }
+ Error("premature end of bitstream in PCH file");
+ return Failure;
+}
+
+PCHReader::PCHReadResult PCHReader::ReadPCH(const std::string &FileName) {
+ // Set the PCH file name.
+ this->FileName = FileName;
+
+ // Open the PCH file.
+ std::string ErrStr;
+ Buffer.reset(llvm::MemoryBuffer::getFile(FileName.c_str(), &ErrStr));
+ if (!Buffer) {
+ Error(ErrStr.c_str());
+ return IgnorePCH;
+ }
+
+ // Initialize the stream
+ StreamFile.init((const unsigned char *)Buffer->getBufferStart(),
+ (const unsigned char *)Buffer->getBufferEnd());
+ Stream.init(StreamFile);
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'P' ||
+ Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'H') {
+ Diag(diag::err_not_a_pch_file) << FileName;
+ return Failure;
+ }
+
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ if (Code != llvm::bitc::ENTER_SUBBLOCK) {
+ Error("invalid record at top-level of PCH file");
+ return Failure;
+ }
+
+ unsigned BlockID = Stream.ReadSubBlockID();
+
+ // We only know the PCH subblock ID.
+ switch (BlockID) {
+ case llvm::bitc::BLOCKINFO_BLOCK_ID:
+ if (Stream.ReadBlockInfoBlock()) {
+ Error("malformed BlockInfoBlock in PCH file");
+ return Failure;
+ }
+ break;
+ case pch::PCH_BLOCK_ID:
+ switch (ReadPCHBlock()) {
+ case Success:
+ break;
+
+ case Failure:
+ return Failure;
+
+ case IgnorePCH:
+ // FIXME: We could consider reading through to the end of this
+ // PCH block, skipping subblocks, to see if there are other
+ // PCH blocks elsewhere.
+
+ // Clear out any preallocated source location entries, so that
+ // the source manager does not try to resolve them later.
+ PP.getSourceManager().ClearPreallocatedSLocEntries();
+
+ // Remove the stat cache.
+ PP.getFileManager().setStatCache(0);
+
+ return IgnorePCH;
+ }
+ break;
+ default:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in PCH file");
+ return Failure;
+ }
+ break;
+ }
+ }
+
+ // Load the translation unit declaration
+ if (Context)
+ ReadDeclRecord(DeclOffsets[0], 0);
+
+ // Check the predefines buffer.
+ if (CheckPredefinesBuffer(PCHPredefines, PCHPredefinesLen,
+ PCHPredefinesBufferID))
+ return IgnorePCH;
+
+ // Initialization of builtins and library builtins occurs before the
+ // PCH file is read, so there may be some identifiers that were
+ // loaded into the IdentifierTable before we intercepted the
+ // creation of identifiers. Iterate through the list of known
+ // identifiers and determine whether we have to establish
+ // preprocessor definitions or top-level identifier declaration
+ // chains for those identifiers.
+ //
+ // We copy the IdentifierInfo pointers to a small vector first,
+ // since de-serializing declarations or macro definitions can add
+ // new entries into the identifier table, invalidating the
+ // iterators.
+ llvm::SmallVector<IdentifierInfo *, 128> Identifiers;
+ for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
+ IdEnd = PP.getIdentifierTable().end();
+ Id != IdEnd; ++Id)
+ Identifiers.push_back(Id->second);
+ PCHIdentifierLookupTable *IdTable
+ = (PCHIdentifierLookupTable *)IdentifierLookupTable;
+ for (unsigned I = 0, N = Identifiers.size(); I != N; ++I) {
+ IdentifierInfo *II = Identifiers[I];
+ // Look in the on-disk hash table for an entry for
+ PCHIdentifierLookupTrait Info(*this, II);
+ std::pair<const char*, unsigned> Key(II->getName(), II->getLength());
+ PCHIdentifierLookupTable::iterator Pos = IdTable->find(Key, &Info);
+ if (Pos == IdTable->end())
+ continue;
+
+ // Dereferencing the iterator has the effect of populating the
+ // IdentifierInfo node with the various declarations it needs.
+ (void)*Pos;
+ }
+
+ // Load the special types.
+ if (Context) {
+ Context->setBuiltinVaListType(
+ GetType(SpecialTypes[pch::SPECIAL_TYPE_BUILTIN_VA_LIST]));
+ if (unsigned Id = SpecialTypes[pch::SPECIAL_TYPE_OBJC_ID])
+ Context->setObjCIdType(GetType(Id));
+ if (unsigned Sel = SpecialTypes[pch::SPECIAL_TYPE_OBJC_SELECTOR])
+ Context->setObjCSelType(GetType(Sel));
+ if (unsigned Proto = SpecialTypes[pch::SPECIAL_TYPE_OBJC_PROTOCOL])
+ Context->setObjCProtoType(GetType(Proto));
+ if (unsigned Class = SpecialTypes[pch::SPECIAL_TYPE_OBJC_CLASS])
+ Context->setObjCClassType(GetType(Class));
+ if (unsigned String = SpecialTypes[pch::SPECIAL_TYPE_CF_CONSTANT_STRING])
+ Context->setCFConstantStringType(GetType(String));
+ if (unsigned FastEnum
+ = SpecialTypes[pch::SPECIAL_TYPE_OBJC_FAST_ENUMERATION_STATE])
+ Context->setObjCFastEnumerationStateType(GetType(FastEnum));
+ }
+
+ return Success;
+}
+
+/// \brief Retrieve the name of the original source file name
+/// directly from the PCH file, without actually loading the PCH
+/// file.
+std::string PCHReader::getOriginalSourceFile(const std::string &PCHFileName) {
+ // Open the PCH file.
+ std::string ErrStr;
+ llvm::OwningPtr<llvm::MemoryBuffer> Buffer;
+ Buffer.reset(llvm::MemoryBuffer::getFile(PCHFileName.c_str(), &ErrStr));
+ if (!Buffer) {
+ fprintf(stderr, "error: %s\n", ErrStr.c_str());
+ return std::string();
+ }
+
+ // Initialize the stream
+ llvm::BitstreamReader StreamFile;
+ llvm::BitstreamCursor Stream;
+ StreamFile.init((const unsigned char *)Buffer->getBufferStart(),
+ (const unsigned char *)Buffer->getBufferEnd());
+ Stream.init(StreamFile);
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'P' ||
+ Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'H') {
+ fprintf(stderr,
+ "error: '%s' does not appear to be a precompiled header file\n",
+ PCHFileName.c_str());
+ return std::string();
+ }
+
+ RecordData Record;
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ unsigned BlockID = Stream.ReadSubBlockID();
+
+ // We only know the PCH subblock ID.
+ switch (BlockID) {
+ case pch::PCH_BLOCK_ID:
+ if (Stream.EnterSubBlock(pch::PCH_BLOCK_ID)) {
+ fprintf(stderr, "error: malformed block record in PCH file\n");
+ return std::string();
+ }
+ break;
+
+ default:
+ if (Stream.SkipBlock()) {
+ fprintf(stderr, "error: malformed block record in PCH file\n");
+ return std::string();
+ }
+ break;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd()) {
+ fprintf(stderr, "error: error at end of module block in PCH file\n");
+ return std::string();
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ Record.clear();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ if (Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen)
+ == pch::ORIGINAL_FILE_NAME)
+ return std::string(BlobStart, BlobLen);
+ }
+
+ return std::string();
+}
+
+/// \brief Parse the record that corresponds to a LangOptions data
+/// structure.
+///
+/// This routine compares the language options used to generate the
+/// PCH file against the language options set for the current
+/// compilation. For each option, we classify differences between the
+/// two compiler states as either "benign" or "important". Benign
+/// differences don't matter, and we accept them without complaint
+/// (and without modifying the language options). Differences between
+/// the states for important options cause the PCH file to be
+/// unusable, so we emit a warning and return true to indicate that
+/// there was an error.
+///
+/// \returns true if the PCH file is unacceptable, false otherwise.
+bool PCHReader::ParseLanguageOptions(
+ const llvm::SmallVectorImpl<uint64_t> &Record) {
+ const LangOptions &LangOpts = PP.getLangOptions();
+#define PARSE_LANGOPT_BENIGN(Option) ++Idx
+#define PARSE_LANGOPT_IMPORTANT(Option, DiagID) \
+ if (Record[Idx] != LangOpts.Option) { \
+ Diag(DiagID) << (unsigned)Record[Idx] << LangOpts.Option; \
+ return true; \
+ } \
+ ++Idx
+
+ unsigned Idx = 0;
+ PARSE_LANGOPT_BENIGN(Trigraphs);
+ PARSE_LANGOPT_BENIGN(BCPLComment);
+ PARSE_LANGOPT_BENIGN(DollarIdents);
+ PARSE_LANGOPT_BENIGN(AsmPreprocessor);
+ PARSE_LANGOPT_IMPORTANT(GNUMode, diag::warn_pch_gnu_extensions);
+ PARSE_LANGOPT_BENIGN(ImplicitInt);
+ PARSE_LANGOPT_BENIGN(Digraphs);
+ PARSE_LANGOPT_BENIGN(HexFloats);
+ PARSE_LANGOPT_IMPORTANT(C99, diag::warn_pch_c99);
+ PARSE_LANGOPT_IMPORTANT(Microsoft, diag::warn_pch_microsoft_extensions);
+ PARSE_LANGOPT_IMPORTANT(CPlusPlus, diag::warn_pch_cplusplus);
+ PARSE_LANGOPT_IMPORTANT(CPlusPlus0x, diag::warn_pch_cplusplus0x);
+ PARSE_LANGOPT_BENIGN(CXXOperatorName);
+ PARSE_LANGOPT_IMPORTANT(ObjC1, diag::warn_pch_objective_c);
+ PARSE_LANGOPT_IMPORTANT(ObjC2, diag::warn_pch_objective_c2);
+ PARSE_LANGOPT_IMPORTANT(ObjCNonFragileABI, diag::warn_pch_nonfragile_abi);
+ PARSE_LANGOPT_BENIGN(PascalStrings);
+ PARSE_LANGOPT_BENIGN(WritableStrings);
+ PARSE_LANGOPT_IMPORTANT(LaxVectorConversions,
+ diag::warn_pch_lax_vector_conversions);
+ PARSE_LANGOPT_IMPORTANT(Exceptions, diag::warn_pch_exceptions);
+ PARSE_LANGOPT_IMPORTANT(NeXTRuntime, diag::warn_pch_objc_runtime);
+ PARSE_LANGOPT_IMPORTANT(Freestanding, diag::warn_pch_freestanding);
+ PARSE_LANGOPT_IMPORTANT(NoBuiltin, diag::warn_pch_builtins);
+ PARSE_LANGOPT_IMPORTANT(ThreadsafeStatics,
+ diag::warn_pch_thread_safe_statics);
+ PARSE_LANGOPT_IMPORTANT(Blocks, diag::warn_pch_blocks);
+ PARSE_LANGOPT_BENIGN(EmitAllDecls);
+ PARSE_LANGOPT_IMPORTANT(MathErrno, diag::warn_pch_math_errno);
+ PARSE_LANGOPT_IMPORTANT(OverflowChecking, diag::warn_pch_overflow_checking);
+ PARSE_LANGOPT_IMPORTANT(HeinousExtensions,
+ diag::warn_pch_heinous_extensions);
+ // FIXME: Most of the options below are benign if the macro wasn't
+ // used. Unfortunately, this means that a PCH compiled without
+ // optimization can't be used with optimization turned on, even
+ // though the only thing that changes is whether __OPTIMIZE__ was
+ // defined... but if __OPTIMIZE__ never showed up in the header, it
+ // doesn't matter. We could consider making this some special kind
+ // of check.
+ PARSE_LANGOPT_IMPORTANT(Optimize, diag::warn_pch_optimize);
+ PARSE_LANGOPT_IMPORTANT(OptimizeSize, diag::warn_pch_optimize_size);
+ PARSE_LANGOPT_IMPORTANT(Static, diag::warn_pch_static);
+ PARSE_LANGOPT_IMPORTANT(PICLevel, diag::warn_pch_pic_level);
+ PARSE_LANGOPT_IMPORTANT(GNUInline, diag::warn_pch_gnu_inline);
+ PARSE_LANGOPT_IMPORTANT(NoInline, diag::warn_pch_no_inline);
+ PARSE_LANGOPT_IMPORTANT(AccessControl, diag::warn_pch_access_control);
+ if ((LangOpts.getGCMode() != 0) != (Record[Idx] != 0)) {
+ Diag(diag::warn_pch_gc_mode)
+ << (unsigned)Record[Idx] << LangOpts.getGCMode();
+ return true;
+ }
+ ++Idx;
+ PARSE_LANGOPT_BENIGN(getVisibilityMode());
+ PARSE_LANGOPT_BENIGN(InstantiationDepth);
+#undef PARSE_LANGOPT_IRRELEVANT
+#undef PARSE_LANGOPT_BENIGN
+
+ return false;
+}
+
+/// \brief Read and return the type at the given offset.
+///
+/// This routine actually reads the record corresponding to the type
+/// at the given offset in the bitstream. It is a helper routine for
+/// GetType, which deals with reading type IDs.
+QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this type.
+ SavedStreamPosition SavedPosition(Stream);
+
+ Stream.JumpToBit(Offset);
+ RecordData Record;
+ unsigned Code = Stream.ReadCode();
+ switch ((pch::TypeCode)Stream.ReadRecord(Code, Record)) {
+ case pch::TYPE_EXT_QUAL: {
+ assert(Record.size() == 3 &&
+ "Incorrect encoding of extended qualifier type");
+ QualType Base = GetType(Record[0]);
+ QualType::GCAttrTypes GCAttr = (QualType::GCAttrTypes)Record[1];
+ unsigned AddressSpace = Record[2];
+
+ QualType T = Base;
+ if (GCAttr != QualType::GCNone)
+ T = Context->getObjCGCQualType(T, GCAttr);
+ if (AddressSpace)
+ T = Context->getAddrSpaceQualType(T, AddressSpace);
+ return T;
+ }
+
+ case pch::TYPE_FIXED_WIDTH_INT: {
+ assert(Record.size() == 2 && "Incorrect encoding of fixed-width int type");
+ return Context->getFixedWidthIntType(Record[0], Record[1]);
+ }
+
+ case pch::TYPE_COMPLEX: {
+ assert(Record.size() == 1 && "Incorrect encoding of complex type");
+ QualType ElemType = GetType(Record[0]);
+ return Context->getComplexType(ElemType);
+ }
+
+ case pch::TYPE_POINTER: {
+ assert(Record.size() == 1 && "Incorrect encoding of pointer type");
+ QualType PointeeType = GetType(Record[0]);
+ return Context->getPointerType(PointeeType);
+ }
+
+ case pch::TYPE_BLOCK_POINTER: {
+ assert(Record.size() == 1 && "Incorrect encoding of block pointer type");
+ QualType PointeeType = GetType(Record[0]);
+ return Context->getBlockPointerType(PointeeType);
+ }
+
+ case pch::TYPE_LVALUE_REFERENCE: {
+ assert(Record.size() == 1 && "Incorrect encoding of lvalue reference type");
+ QualType PointeeType = GetType(Record[0]);
+ return Context->getLValueReferenceType(PointeeType);
+ }
+
+ case pch::TYPE_RVALUE_REFERENCE: {
+ assert(Record.size() == 1 && "Incorrect encoding of rvalue reference type");
+ QualType PointeeType = GetType(Record[0]);
+ return Context->getRValueReferenceType(PointeeType);
+ }
+
+ case pch::TYPE_MEMBER_POINTER: {
+ assert(Record.size() == 1 && "Incorrect encoding of member pointer type");
+ QualType PointeeType = GetType(Record[0]);
+ QualType ClassType = GetType(Record[1]);
+ return Context->getMemberPointerType(PointeeType, ClassType.getTypePtr());
+ }
+
+ case pch::TYPE_CONSTANT_ARRAY: {
+ QualType ElementType = GetType(Record[0]);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ unsigned Idx = 3;
+ llvm::APInt Size = ReadAPInt(Record, Idx);
+ return Context->getConstantArrayType(ElementType, Size, ASM,IndexTypeQuals);
+ }
+
+ case pch::TYPE_INCOMPLETE_ARRAY: {
+ QualType ElementType = GetType(Record[0]);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ return Context->getIncompleteArrayType(ElementType, ASM, IndexTypeQuals);
+ }
+
+ case pch::TYPE_VARIABLE_ARRAY: {
+ QualType ElementType = GetType(Record[0]);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ return Context->getVariableArrayType(ElementType, ReadTypeExpr(),
+ ASM, IndexTypeQuals);
+ }
+
+ case pch::TYPE_VECTOR: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of vector type in PCH file");
+ return QualType();
+ }
+
+ QualType ElementType = GetType(Record[0]);
+ unsigned NumElements = Record[1];
+ return Context->getVectorType(ElementType, NumElements);
+ }
+
+ case pch::TYPE_EXT_VECTOR: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of extended vector type in PCH file");
+ return QualType();
+ }
+
+ QualType ElementType = GetType(Record[0]);
+ unsigned NumElements = Record[1];
+ return Context->getExtVectorType(ElementType, NumElements);
+ }
+
+ case pch::TYPE_FUNCTION_NO_PROTO: {
+ if (Record.size() != 1) {
+ Error("incorrect encoding of no-proto function type");
+ return QualType();
+ }
+ QualType ResultType = GetType(Record[0]);
+ return Context->getFunctionNoProtoType(ResultType);
+ }
+
+ case pch::TYPE_FUNCTION_PROTO: {
+ QualType ResultType = GetType(Record[0]);
+ unsigned Idx = 1;
+ unsigned NumParams = Record[Idx++];
+ llvm::SmallVector<QualType, 16> ParamTypes;
+ for (unsigned I = 0; I != NumParams; ++I)
+ ParamTypes.push_back(GetType(Record[Idx++]));
+ bool isVariadic = Record[Idx++];
+ unsigned Quals = Record[Idx++];
+ bool hasExceptionSpec = Record[Idx++];
+ bool hasAnyExceptionSpec = Record[Idx++];
+ unsigned NumExceptions = Record[Idx++];
+ llvm::SmallVector<QualType, 2> Exceptions;
+ for (unsigned I = 0; I != NumExceptions; ++I)
+ Exceptions.push_back(GetType(Record[Idx++]));
+ return Context->getFunctionType(ResultType, ParamTypes.data(), NumParams,
+ isVariadic, Quals, hasExceptionSpec,
+ hasAnyExceptionSpec, NumExceptions,
+ Exceptions.data());
+ }
+
+ case pch::TYPE_TYPEDEF:
+ assert(Record.size() == 1 && "incorrect encoding of typedef type");
+ return Context->getTypeDeclType(cast<TypedefDecl>(GetDecl(Record[0])));
+
+ case pch::TYPE_TYPEOF_EXPR:
+ return Context->getTypeOfExprType(ReadTypeExpr());
+
+ case pch::TYPE_TYPEOF: {
+ if (Record.size() != 1) {
+ Error("incorrect encoding of typeof(type) in PCH file");
+ return QualType();
+ }
+ QualType UnderlyingType = GetType(Record[0]);
+ return Context->getTypeOfType(UnderlyingType);
+ }
+
+ case pch::TYPE_RECORD:
+ assert(Record.size() == 1 && "incorrect encoding of record type");
+ return Context->getTypeDeclType(cast<RecordDecl>(GetDecl(Record[0])));
+
+ case pch::TYPE_ENUM:
+ assert(Record.size() == 1 && "incorrect encoding of enum type");
+ return Context->getTypeDeclType(cast<EnumDecl>(GetDecl(Record[0])));
+
+ case pch::TYPE_OBJC_INTERFACE:
+ assert(Record.size() == 1 && "incorrect encoding of objc interface type");
+ return Context->getObjCInterfaceType(
+ cast<ObjCInterfaceDecl>(GetDecl(Record[0])));
+
+ case pch::TYPE_OBJC_QUALIFIED_INTERFACE: {
+ unsigned Idx = 0;
+ ObjCInterfaceDecl *ItfD = cast<ObjCInterfaceDecl>(GetDecl(Record[Idx++]));
+ unsigned NumProtos = Record[Idx++];
+ llvm::SmallVector<ObjCProtocolDecl*, 4> Protos;
+ for (unsigned I = 0; I != NumProtos; ++I)
+ Protos.push_back(cast<ObjCProtocolDecl>(GetDecl(Record[Idx++])));
+ return Context->getObjCQualifiedInterfaceType(ItfD, Protos.data(), NumProtos);
+ }
+
+ case pch::TYPE_OBJC_QUALIFIED_ID: {
+ unsigned Idx = 0;
+ unsigned NumProtos = Record[Idx++];
+ llvm::SmallVector<ObjCProtocolDecl*, 4> Protos;
+ for (unsigned I = 0; I != NumProtos; ++I)
+ Protos.push_back(cast<ObjCProtocolDecl>(GetDecl(Record[Idx++])));
+ return Context->getObjCQualifiedIdType(Protos.data(), NumProtos);
+ }
+ }
+ // Suppress a GCC warning
+ return QualType();
+}
+
+
+QualType PCHReader::GetType(pch::TypeID ID) {
+ unsigned Quals = ID & 0x07;
+ unsigned Index = ID >> 3;
+
+ if (Index < pch::NUM_PREDEF_TYPE_IDS) {
+ QualType T;
+ switch ((pch::PredefinedTypeIDs)Index) {
+ case pch::PREDEF_TYPE_NULL_ID: return QualType();
+ case pch::PREDEF_TYPE_VOID_ID: T = Context->VoidTy; break;
+ case pch::PREDEF_TYPE_BOOL_ID: T = Context->BoolTy; break;
+
+ case pch::PREDEF_TYPE_CHAR_U_ID:
+ case pch::PREDEF_TYPE_CHAR_S_ID:
+ // FIXME: Check that the signedness of CharTy is correct!
+ T = Context->CharTy;
+ break;
+
+ case pch::PREDEF_TYPE_UCHAR_ID: T = Context->UnsignedCharTy; break;
+ case pch::PREDEF_TYPE_USHORT_ID: T = Context->UnsignedShortTy; break;
+ case pch::PREDEF_TYPE_UINT_ID: T = Context->UnsignedIntTy; break;
+ case pch::PREDEF_TYPE_ULONG_ID: T = Context->UnsignedLongTy; break;
+ case pch::PREDEF_TYPE_ULONGLONG_ID: T = Context->UnsignedLongLongTy; break;
+ case pch::PREDEF_TYPE_UINT128_ID: T = Context->UnsignedInt128Ty; break;
+ case pch::PREDEF_TYPE_SCHAR_ID: T = Context->SignedCharTy; break;
+ case pch::PREDEF_TYPE_WCHAR_ID: T = Context->WCharTy; break;
+ case pch::PREDEF_TYPE_SHORT_ID: T = Context->ShortTy; break;
+ case pch::PREDEF_TYPE_INT_ID: T = Context->IntTy; break;
+ case pch::PREDEF_TYPE_LONG_ID: T = Context->LongTy; break;
+ case pch::PREDEF_TYPE_LONGLONG_ID: T = Context->LongLongTy; break;
+ case pch::PREDEF_TYPE_INT128_ID: T = Context->Int128Ty; break;
+ case pch::PREDEF_TYPE_FLOAT_ID: T = Context->FloatTy; break;
+ case pch::PREDEF_TYPE_DOUBLE_ID: T = Context->DoubleTy; break;
+ case pch::PREDEF_TYPE_LONGDOUBLE_ID: T = Context->LongDoubleTy; break;
+ case pch::PREDEF_TYPE_OVERLOAD_ID: T = Context->OverloadTy; break;
+ case pch::PREDEF_TYPE_DEPENDENT_ID: T = Context->DependentTy; break;
+ case pch::PREDEF_TYPE_NULLPTR_ID: T = Context->NullPtrTy; break;
+ }
+
+ assert(!T.isNull() && "Unknown predefined type");
+ return T.getQualifiedType(Quals);
+ }
+
+ Index -= pch::NUM_PREDEF_TYPE_IDS;
+ assert(Index < TypesLoaded.size() && "Type index out-of-range");
+ if (!TypesLoaded[Index])
+ TypesLoaded[Index] = ReadTypeRecord(TypeOffsets[Index]).getTypePtr();
+
+ return QualType(TypesLoaded[Index], Quals);
+}
+
+Decl *PCHReader::GetDecl(pch::DeclID ID) {
+ if (ID == 0)
+ return 0;
+
+ if (ID > DeclsLoaded.size()) {
+ Error("declaration ID out-of-range for PCH file");
+ return 0;
+ }
+
+ unsigned Index = ID - 1;
+ if (!DeclsLoaded[Index])
+ ReadDeclRecord(DeclOffsets[Index], Index);
+
+ return DeclsLoaded[Index];
+}
+
+/// \brief Resolve the offset of a statement into a statement.
+///
+/// This operation will read a new statement from the external
+/// source each time it is called, and is meant to be used via a
+/// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
+Stmt *PCHReader::GetDeclStmt(uint64_t Offset) {
+ // Since we know tha this statement is part of a decl, make sure to use the
+ // decl cursor to read it.
+ DeclsCursor.JumpToBit(Offset);
+ return ReadStmt(DeclsCursor);
+}
+
+bool PCHReader::ReadDeclsLexicallyInContext(DeclContext *DC,
+ llvm::SmallVectorImpl<pch::DeclID> &Decls) {
+ assert(DC->hasExternalLexicalStorage() &&
+ "DeclContext has no lexical decls in storage");
+ uint64_t Offset = DeclContextOffsets[DC].first;
+ assert(Offset && "DeclContext has no lexical decls in storage");
+
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this context.
+ SavedStreamPosition SavedPosition(DeclsCursor);
+
+ // Load the record containing all of the declarations lexically in
+ // this context.
+ DeclsCursor.JumpToBit(Offset);
+ RecordData Record;
+ unsigned Code = DeclsCursor.ReadCode();
+ unsigned RecCode = DeclsCursor.ReadRecord(Code, Record);
+ (void)RecCode;
+ assert(RecCode == pch::DECL_CONTEXT_LEXICAL && "Expected lexical block");
+
+ // Load all of the declaration IDs
+ Decls.clear();
+ Decls.insert(Decls.end(), Record.begin(), Record.end());
+ ++NumLexicalDeclContextsRead;
+ return false;
+}
+
+bool PCHReader::ReadDeclsVisibleInContext(DeclContext *DC,
+ llvm::SmallVectorImpl<VisibleDeclaration> &Decls) {
+ assert(DC->hasExternalVisibleStorage() &&
+ "DeclContext has no visible decls in storage");
+ uint64_t Offset = DeclContextOffsets[DC].second;
+ assert(Offset && "DeclContext has no visible decls in storage");
+
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this context.
+ SavedStreamPosition SavedPosition(DeclsCursor);
+
+ // Load the record containing all of the declarations visible in
+ // this context.
+ DeclsCursor.JumpToBit(Offset);
+ RecordData Record;
+ unsigned Code = DeclsCursor.ReadCode();
+ unsigned RecCode = DeclsCursor.ReadRecord(Code, Record);
+ (void)RecCode;
+ assert(RecCode == pch::DECL_CONTEXT_VISIBLE && "Expected visible block");
+ if (Record.size() == 0)
+ return false;
+
+ Decls.clear();
+
+ unsigned Idx = 0;
+ while (Idx < Record.size()) {
+ Decls.push_back(VisibleDeclaration());
+ Decls.back().Name = ReadDeclarationName(Record, Idx);
+
+ unsigned Size = Record[Idx++];
+ llvm::SmallVector<unsigned, 4> &LoadedDecls = Decls.back().Declarations;
+ LoadedDecls.reserve(Size);
+ for (unsigned I = 0; I < Size; ++I)
+ LoadedDecls.push_back(Record[Idx++]);
+ }
+
+ ++NumVisibleDeclContextsRead;
+ return false;
+}
+
+void PCHReader::StartTranslationUnit(ASTConsumer *Consumer) {
+ this->Consumer = Consumer;
+
+ if (!Consumer)
+ return;
+
+ for (unsigned I = 0, N = ExternalDefinitions.size(); I != N; ++I) {
+ Decl *D = GetDecl(ExternalDefinitions[I]);
+ DeclGroupRef DG(D);
+ Consumer->HandleTopLevelDecl(DG);
+ }
+
+ for (unsigned I = 0, N = InterestingDecls.size(); I != N; ++I) {
+ DeclGroupRef DG(InterestingDecls[I]);
+ Consumer->HandleTopLevelDecl(DG);
+ }
+}
+
+void PCHReader::PrintStats() {
+ std::fprintf(stderr, "*** PCH Statistics:\n");
+
+ unsigned NumTypesLoaded
+ = TypesLoaded.size() - std::count(TypesLoaded.begin(), TypesLoaded.end(),
+ (Type *)0);
+ unsigned NumDeclsLoaded
+ = DeclsLoaded.size() - std::count(DeclsLoaded.begin(), DeclsLoaded.end(),
+ (Decl *)0);
+ unsigned NumIdentifiersLoaded
+ = IdentifiersLoaded.size() - std::count(IdentifiersLoaded.begin(),
+ IdentifiersLoaded.end(),
+ (IdentifierInfo *)0);
+ unsigned NumSelectorsLoaded
+ = SelectorsLoaded.size() - std::count(SelectorsLoaded.begin(),
+ SelectorsLoaded.end(),
+ Selector());
+
+ std::fprintf(stderr, " %u stat cache hits\n", NumStatHits);
+ std::fprintf(stderr, " %u stat cache misses\n", NumStatMisses);
+ if (TotalNumSLocEntries)
+ std::fprintf(stderr, " %u/%u source location entries read (%f%%)\n",
+ NumSLocEntriesRead, TotalNumSLocEntries,
+ ((float)NumSLocEntriesRead/TotalNumSLocEntries * 100));
+ if (!TypesLoaded.empty())
+ std::fprintf(stderr, " %u/%u types read (%f%%)\n",
+ NumTypesLoaded, (unsigned)TypesLoaded.size(),
+ ((float)NumTypesLoaded/TypesLoaded.size() * 100));
+ if (!DeclsLoaded.empty())
+ std::fprintf(stderr, " %u/%u declarations read (%f%%)\n",
+ NumDeclsLoaded, (unsigned)DeclsLoaded.size(),
+ ((float)NumDeclsLoaded/DeclsLoaded.size() * 100));
+ if (!IdentifiersLoaded.empty())
+ std::fprintf(stderr, " %u/%u identifiers read (%f%%)\n",
+ NumIdentifiersLoaded, (unsigned)IdentifiersLoaded.size(),
+ ((float)NumIdentifiersLoaded/IdentifiersLoaded.size() * 100));
+ if (TotalNumSelectors)
+ std::fprintf(stderr, " %u/%u selectors read (%f%%)\n",
+ NumSelectorsLoaded, TotalNumSelectors,
+ ((float)NumSelectorsLoaded/TotalNumSelectors * 100));
+ if (TotalNumStatements)
+ std::fprintf(stderr, " %u/%u statements read (%f%%)\n",
+ NumStatementsRead, TotalNumStatements,
+ ((float)NumStatementsRead/TotalNumStatements * 100));
+ if (TotalNumMacros)
+ std::fprintf(stderr, " %u/%u macros read (%f%%)\n",
+ NumMacrosRead, TotalNumMacros,
+ ((float)NumMacrosRead/TotalNumMacros * 100));
+ if (TotalLexicalDeclContexts)
+ std::fprintf(stderr, " %u/%u lexical declcontexts read (%f%%)\n",
+ NumLexicalDeclContextsRead, TotalLexicalDeclContexts,
+ ((float)NumLexicalDeclContextsRead/TotalLexicalDeclContexts
+ * 100));
+ if (TotalVisibleDeclContexts)
+ std::fprintf(stderr, " %u/%u visible declcontexts read (%f%%)\n",
+ NumVisibleDeclContextsRead, TotalVisibleDeclContexts,
+ ((float)NumVisibleDeclContextsRead/TotalVisibleDeclContexts
+ * 100));
+ if (TotalSelectorsInMethodPool) {
+ std::fprintf(stderr, " %u/%u method pool entries read (%f%%)\n",
+ NumMethodPoolSelectorsRead, TotalSelectorsInMethodPool,
+ ((float)NumMethodPoolSelectorsRead/TotalSelectorsInMethodPool
+ * 100));
+ std::fprintf(stderr, " %u method pool misses\n", NumMethodPoolMisses);
+ }
+ std::fprintf(stderr, "\n");
+}
+
+void PCHReader::InitializeSema(Sema &S) {
+ SemaObj = &S;
+ S.ExternalSource = this;
+
+ // Makes sure any declarations that were deserialized "too early"
+ // still get added to the identifier's declaration chains.
+ for (unsigned I = 0, N = PreloadedDecls.size(); I != N; ++I) {
+ SemaObj->TUScope->AddDecl(Action::DeclPtrTy::make(PreloadedDecls[I]));
+ SemaObj->IdResolver.AddDecl(PreloadedDecls[I]);
+ }
+ PreloadedDecls.clear();
+
+ // If there were any tentative definitions, deserialize them and add
+ // them to Sema's table of tentative definitions.
+ for (unsigned I = 0, N = TentativeDefinitions.size(); I != N; ++I) {
+ VarDecl *Var = cast<VarDecl>(GetDecl(TentativeDefinitions[I]));
+ SemaObj->TentativeDefinitions[Var->getDeclName()] = Var;
+ }
+
+ // If there were any locally-scoped external declarations,
+ // deserialize them and add them to Sema's table of locally-scoped
+ // external declarations.
+ for (unsigned I = 0, N = LocallyScopedExternalDecls.size(); I != N; ++I) {
+ NamedDecl *D = cast<NamedDecl>(GetDecl(LocallyScopedExternalDecls[I]));
+ SemaObj->LocallyScopedExternalDecls[D->getDeclName()] = D;
+ }
+
+ // If there were any ext_vector type declarations, deserialize them
+ // and add them to Sema's vector of such declarations.
+ for (unsigned I = 0, N = ExtVectorDecls.size(); I != N; ++I)
+ SemaObj->ExtVectorDecls.push_back(
+ cast<TypedefDecl>(GetDecl(ExtVectorDecls[I])));
+
+ // If there were any Objective-C category implementations,
+ // deserialize them and add them to Sema's vector of such
+ // definitions.
+ for (unsigned I = 0, N = ObjCCategoryImpls.size(); I != N; ++I)
+ SemaObj->ObjCCategoryImpls.push_back(
+ cast<ObjCCategoryImplDecl>(GetDecl(ObjCCategoryImpls[I])));
+}
+
+IdentifierInfo* PCHReader::get(const char *NameStart, const char *NameEnd) {
+ // Try to find this name within our on-disk hash table
+ PCHIdentifierLookupTable *IdTable
+ = (PCHIdentifierLookupTable *)IdentifierLookupTable;
+ std::pair<const char*, unsigned> Key(NameStart, NameEnd - NameStart);
+ PCHIdentifierLookupTable::iterator Pos = IdTable->find(Key);
+ if (Pos == IdTable->end())
+ return 0;
+
+ // Dereferencing the iterator has the effect of building the
+ // IdentifierInfo node and populating it with the various
+ // declarations it needs.
+ return *Pos;
+}
+
+std::pair<ObjCMethodList, ObjCMethodList>
+PCHReader::ReadMethodPool(Selector Sel) {
+ if (!MethodPoolLookupTable)
+ return std::pair<ObjCMethodList, ObjCMethodList>();
+
+ // Try to find this selector within our on-disk hash table.
+ PCHMethodPoolLookupTable *PoolTable
+ = (PCHMethodPoolLookupTable*)MethodPoolLookupTable;
+ PCHMethodPoolLookupTable::iterator Pos = PoolTable->find(Sel);
+ if (Pos == PoolTable->end()) {
+ ++NumMethodPoolMisses;
+ return std::pair<ObjCMethodList, ObjCMethodList>();;
+ }
+
+ ++NumMethodPoolSelectorsRead;
+ return *Pos;
+}
+
+void PCHReader::SetIdentifierInfo(unsigned ID, IdentifierInfo *II) {
+ assert(ID && "Non-zero identifier ID required");
+ assert(ID <= IdentifiersLoaded.size() && "identifier ID out of range");
+ IdentifiersLoaded[ID - 1] = II;
+}
+
+IdentifierInfo *PCHReader::DecodeIdentifierInfo(unsigned ID) {
+ if (ID == 0)
+ return 0;
+
+ if (!IdentifierTableData || IdentifiersLoaded.empty()) {
+ Error("no identifier table in PCH file");
+ return 0;
+ }
+
+ if (!IdentifiersLoaded[ID - 1]) {
+ uint32_t Offset = IdentifierOffsets[ID - 1];
+ const char *Str = IdentifierTableData + Offset;
+
+ // All of the strings in the PCH file are preceded by a 16-bit
+ // length. Extract that 16-bit length to avoid having to execute
+ // strlen().
+ const char *StrLenPtr = Str - 2;
+ unsigned StrLen = (((unsigned) StrLenPtr[0])
+ | (((unsigned) StrLenPtr[1]) << 8)) - 1;
+ IdentifiersLoaded[ID - 1]
+ = &PP.getIdentifierTable().get(Str, Str + StrLen);
+ }
+
+ return IdentifiersLoaded[ID - 1];
+}
+
+void PCHReader::ReadSLocEntry(unsigned ID) {
+ ReadSLocEntryRecord(ID);
+}
+
+Selector PCHReader::DecodeSelector(unsigned ID) {
+ if (ID == 0)
+ return Selector();
+
+ if (!MethodPoolLookupTableData)
+ return Selector();
+
+ if (ID > TotalNumSelectors) {
+ Error("selector ID out of range in PCH file");
+ return Selector();
+ }
+
+ unsigned Index = ID - 1;
+ if (SelectorsLoaded[Index].getAsOpaquePtr() == 0) {
+ // Load this selector from the selector table.
+ // FIXME: endianness portability issues with SelectorOffsets table
+ PCHMethodPoolLookupTrait Trait(*this);
+ SelectorsLoaded[Index]
+ = Trait.ReadKey(MethodPoolLookupTableData + SelectorOffsets[Index], 0);
+ }
+
+ return SelectorsLoaded[Index];
+}
+
+DeclarationName
+PCHReader::ReadDeclarationName(const RecordData &Record, unsigned &Idx) {
+ DeclarationName::NameKind Kind = (DeclarationName::NameKind)Record[Idx++];
+ switch (Kind) {
+ case DeclarationName::Identifier:
+ return DeclarationName(GetIdentifierInfo(Record, Idx));
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return DeclarationName(GetSelector(Record, Idx));
+
+ case DeclarationName::CXXConstructorName:
+ return Context->DeclarationNames.getCXXConstructorName(
+ GetType(Record[Idx++]));
+
+ case DeclarationName::CXXDestructorName:
+ return Context->DeclarationNames.getCXXDestructorName(
+ GetType(Record[Idx++]));
+
+ case DeclarationName::CXXConversionFunctionName:
+ return Context->DeclarationNames.getCXXConversionFunctionName(
+ GetType(Record[Idx++]));
+
+ case DeclarationName::CXXOperatorName:
+ return Context->DeclarationNames.getCXXOperatorName(
+ (OverloadedOperatorKind)Record[Idx++]);
+
+ case DeclarationName::CXXUsingDirective:
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ // Required to silence GCC warning
+ return DeclarationName();
+}
+
+/// \brief Read an integral value
+llvm::APInt PCHReader::ReadAPInt(const RecordData &Record, unsigned &Idx) {
+ unsigned BitWidth = Record[Idx++];
+ unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
+ llvm::APInt Result(BitWidth, NumWords, &Record[Idx]);
+ Idx += NumWords;
+ return Result;
+}
+
+/// \brief Read a signed integral value
+llvm::APSInt PCHReader::ReadAPSInt(const RecordData &Record, unsigned &Idx) {
+ bool isUnsigned = Record[Idx++];
+ return llvm::APSInt(ReadAPInt(Record, Idx), isUnsigned);
+}
+
+/// \brief Read a floating-point value
+llvm::APFloat PCHReader::ReadAPFloat(const RecordData &Record, unsigned &Idx) {
+ return llvm::APFloat(ReadAPInt(Record, Idx));
+}
+
+// \brief Read a string
+std::string PCHReader::ReadString(const RecordData &Record, unsigned &Idx) {
+ unsigned Len = Record[Idx++];
+ std::string Result(Record.data() + Idx, Record.data() + Idx + Len);
+ Idx += Len;
+ return Result;
+}
+
+DiagnosticBuilder PCHReader::Diag(unsigned DiagID) {
+ return Diag(SourceLocation(), DiagID);
+}
+
+DiagnosticBuilder PCHReader::Diag(SourceLocation Loc, unsigned DiagID) {
+ return PP.getDiagnostics().Report(FullSourceLoc(Loc,
+ PP.getSourceManager()),
+ DiagID);
+}
+
+/// \brief Retrieve the identifier table associated with the
+/// preprocessor.
+IdentifierTable &PCHReader::getIdentifierTable() {
+ return PP.getIdentifierTable();
+}
+
+/// \brief Record that the given ID maps to the given switch-case
+/// statement.
+void PCHReader::RecordSwitchCaseID(SwitchCase *SC, unsigned ID) {
+ assert(SwitchCaseStmts[ID] == 0 && "Already have a SwitchCase with this ID");
+ SwitchCaseStmts[ID] = SC;
+}
+
+/// \brief Retrieve the switch-case statement with the given ID.
+SwitchCase *PCHReader::getSwitchCaseWithID(unsigned ID) {
+ assert(SwitchCaseStmts[ID] != 0 && "No SwitchCase with this ID");
+ return SwitchCaseStmts[ID];
+}
+
+/// \brief Record that the given label statement has been
+/// deserialized and has the given ID.
+void PCHReader::RecordLabelStmt(LabelStmt *S, unsigned ID) {
+ assert(LabelStmts.find(ID) == LabelStmts.end() &&
+ "Deserialized label twice");
+ LabelStmts[ID] = S;
+
+ // If we've already seen any goto statements that point to this
+ // label, resolve them now.
+ typedef std::multimap<unsigned, GotoStmt *>::iterator GotoIter;
+ std::pair<GotoIter, GotoIter> Gotos = UnresolvedGotoStmts.equal_range(ID);
+ for (GotoIter Goto = Gotos.first; Goto != Gotos.second; ++Goto)
+ Goto->second->setLabel(S);
+ UnresolvedGotoStmts.erase(Gotos.first, Gotos.second);
+
+ // If we've already seen any address-label statements that point to
+ // this label, resolve them now.
+ typedef std::multimap<unsigned, AddrLabelExpr *>::iterator AddrLabelIter;
+ std::pair<AddrLabelIter, AddrLabelIter> AddrLabels
+ = UnresolvedAddrLabelExprs.equal_range(ID);
+ for (AddrLabelIter AddrLabel = AddrLabels.first;
+ AddrLabel != AddrLabels.second; ++AddrLabel)
+ AddrLabel->second->setLabel(S);
+ UnresolvedAddrLabelExprs.erase(AddrLabels.first, AddrLabels.second);
+}
+
+/// \brief Set the label of the given statement to the label
+/// identified by ID.
+///
+/// Depending on the order in which the label and other statements
+/// referencing that label occur, this operation may complete
+/// immediately (updating the statement) or it may queue the
+/// statement to be back-patched later.
+void PCHReader::SetLabelOf(GotoStmt *S, unsigned ID) {
+ std::map<unsigned, LabelStmt *>::iterator Label = LabelStmts.find(ID);
+ if (Label != LabelStmts.end()) {
+ // We've already seen this label, so set the label of the goto and
+ // we're done.
+ S->setLabel(Label->second);
+ } else {
+ // We haven't seen this label yet, so add this goto to the set of
+ // unresolved goto statements.
+ UnresolvedGotoStmts.insert(std::make_pair(ID, S));
+ }
+}
+
+/// \brief Set the label of the given expression to the label
+/// identified by ID.
+///
+/// Depending on the order in which the label and other statements
+/// referencing that label occur, this operation may complete
+/// immediately (updating the statement) or it may queue the
+/// statement to be back-patched later.
+void PCHReader::SetLabelOf(AddrLabelExpr *S, unsigned ID) {
+ std::map<unsigned, LabelStmt *>::iterator Label = LabelStmts.find(ID);
+ if (Label != LabelStmts.end()) {
+ // We've already seen this label, so set the label of the
+ // label-address expression and we're done.
+ S->setLabel(Label->second);
+ } else {
+ // We haven't seen this label yet, so add this label-address
+ // expression to the set of unresolved label-address expressions.
+ UnresolvedAddrLabelExprs.insert(std::make_pair(ID, S));
+ }
+}
diff --git a/lib/Frontend/PCHReaderDecl.cpp b/lib/Frontend/PCHReaderDecl.cpp
new file mode 100644
index 0000000..6856623
--- /dev/null
+++ b/lib/Frontend/PCHReaderDecl.cpp
@@ -0,0 +1,712 @@
+//===--- PCHReaderDecl.cpp - Decl Deserialization ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PCHReader::ReadDeclRecord method, which is the
+// entrypoint for loading a decl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PCHReader.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Expr.h"
+using namespace clang;
+
+
+//===----------------------------------------------------------------------===//
+// Declaration deserialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class PCHDeclReader : public DeclVisitor<PCHDeclReader, void> {
+ PCHReader &Reader;
+ const PCHReader::RecordData &Record;
+ unsigned &Idx;
+
+ public:
+ PCHDeclReader(PCHReader &Reader, const PCHReader::RecordData &Record,
+ unsigned &Idx)
+ : Reader(Reader), Record(Record), Idx(Idx) { }
+
+ void VisitDecl(Decl *D);
+ void VisitTranslationUnitDecl(TranslationUnitDecl *TU);
+ void VisitNamedDecl(NamedDecl *ND);
+ void VisitTypeDecl(TypeDecl *TD);
+ void VisitTypedefDecl(TypedefDecl *TD);
+ void VisitTagDecl(TagDecl *TD);
+ void VisitEnumDecl(EnumDecl *ED);
+ void VisitRecordDecl(RecordDecl *RD);
+ void VisitValueDecl(ValueDecl *VD);
+ void VisitEnumConstantDecl(EnumConstantDecl *ECD);
+ void VisitFunctionDecl(FunctionDecl *FD);
+ void VisitFieldDecl(FieldDecl *FD);
+ void VisitVarDecl(VarDecl *VD);
+ void VisitImplicitParamDecl(ImplicitParamDecl *PD);
+ void VisitParmVarDecl(ParmVarDecl *PD);
+ void VisitOriginalParmVarDecl(OriginalParmVarDecl *PD);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD);
+ void VisitBlockDecl(BlockDecl *BD);
+ std::pair<uint64_t, uint64_t> VisitDeclContext(DeclContext *DC);
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCContainerDecl(ObjCContainerDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCIvarDecl(ObjCIvarDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
+ void VisitObjCClassDecl(ObjCClassDecl *D);
+ void VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCImplDecl(ObjCImplDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ };
+}
+
+void PCHDeclReader::VisitDecl(Decl *D) {
+ D->setDeclContext(cast_or_null<DeclContext>(Reader.GetDecl(Record[Idx++])));
+ D->setLexicalDeclContext(
+ cast_or_null<DeclContext>(Reader.GetDecl(Record[Idx++])));
+ D->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ D->setInvalidDecl(Record[Idx++]);
+ if (Record[Idx++])
+ D->addAttr(Reader.ReadAttributes());
+ D->setImplicit(Record[Idx++]);
+ D->setAccess((AccessSpecifier)Record[Idx++]);
+}
+
+void PCHDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) {
+ VisitDecl(TU);
+}
+
+void PCHDeclReader::VisitNamedDecl(NamedDecl *ND) {
+ VisitDecl(ND);
+ ND->setDeclName(Reader.ReadDeclarationName(Record, Idx));
+}
+
+void PCHDeclReader::VisitTypeDecl(TypeDecl *TD) {
+ VisitNamedDecl(TD);
+ TD->setTypeForDecl(Reader.GetType(Record[Idx++]).getTypePtr());
+}
+
+void PCHDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
+ // Note that we cannot use VisitTypeDecl here, because we need to
+ // set the underlying type of the typedef *before* we try to read
+ // the type associated with the TypedefDecl.
+ VisitNamedDecl(TD);
+ TD->setUnderlyingType(Reader.GetType(Record[Idx + 1]));
+ TD->setTypeForDecl(Reader.GetType(Record[Idx]).getTypePtr());
+ Idx += 2;
+}
+
+void PCHDeclReader::VisitTagDecl(TagDecl *TD) {
+ VisitTypeDecl(TD);
+ TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
+ TD->setDefinition(Record[Idx++]);
+ TD->setTypedefForAnonDecl(
+ cast_or_null<TypedefDecl>(Reader.GetDecl(Record[Idx++])));
+}
+
+void PCHDeclReader::VisitEnumDecl(EnumDecl *ED) {
+ VisitTagDecl(ED);
+ ED->setIntegerType(Reader.GetType(Record[Idx++]));
+ // FIXME: C++ InstantiatedFrom
+}
+
+void PCHDeclReader::VisitRecordDecl(RecordDecl *RD) {
+ VisitTagDecl(RD);
+ RD->setHasFlexibleArrayMember(Record[Idx++]);
+ RD->setAnonymousStructOrUnion(Record[Idx++]);
+}
+
+void PCHDeclReader::VisitValueDecl(ValueDecl *VD) {
+ VisitNamedDecl(VD);
+ VD->setType(Reader.GetType(Record[Idx++]));
+}
+
+void PCHDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
+ VisitValueDecl(ECD);
+ if (Record[Idx++])
+ ECD->setInitExpr(Reader.ReadDeclExpr());
+ ECD->setInitVal(Reader.ReadAPSInt(Record, Idx));
+}
+
+void PCHDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
+ VisitValueDecl(FD);
+ if (Record[Idx++])
+ FD->setLazyBody(Reader.getDeclsCursor().GetCurrentBitNo());
+ FD->setPreviousDeclaration(
+ cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++])));
+ FD->setStorageClass((FunctionDecl::StorageClass)Record[Idx++]);
+ FD->setInline(Record[Idx++]);
+ FD->setC99InlineDefinition(Record[Idx++]);
+ FD->setVirtualAsWritten(Record[Idx++]);
+ FD->setPure(Record[Idx++]);
+ FD->setHasInheritedPrototype(Record[Idx++]);
+ FD->setHasWrittenPrototype(Record[Idx++]);
+ FD->setDeleted(Record[Idx++]);
+ FD->setTypeSpecStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ // FIXME: C++ TemplateOrInstantiation
+ unsigned NumParams = Record[Idx++];
+ llvm::SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
+ FD->setParams(*Reader.getContext(), Params.data(), NumParams);
+}
+
+void PCHDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ VisitNamedDecl(MD);
+ if (Record[Idx++]) {
+ // In practice, this won't be executed (since method definitions
+ // don't occur in header files).
+ MD->setBody(Reader.ReadDeclStmt());
+ MD->setSelfDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++])));
+ MD->setCmdDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++])));
+ }
+ MD->setInstanceMethod(Record[Idx++]);
+ MD->setVariadic(Record[Idx++]);
+ MD->setSynthesized(Record[Idx++]);
+ MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record[Idx++]);
+ MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
+ MD->setResultType(Reader.GetType(Record[Idx++]));
+ MD->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ unsigned NumParams = Record[Idx++];
+ llvm::SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
+ MD->setMethodParams(*Reader.getContext(), Params.data(), NumParams);
+}
+
+void PCHDeclReader::VisitObjCContainerDecl(ObjCContainerDecl *CD) {
+ VisitNamedDecl(CD);
+ CD->setAtEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+}
+
+void PCHDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
+ VisitObjCContainerDecl(ID);
+ ID->setTypeForDecl(Reader.GetType(Record[Idx++]).getTypePtr());
+ ID->setSuperClass(cast_or_null<ObjCInterfaceDecl>
+ (Reader.GetDecl(Record[Idx++])));
+ unsigned NumProtocols = Record[Idx++];
+ llvm::SmallVector<ObjCProtocolDecl *, 16> Protocols;
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++])));
+ ID->setProtocolList(Protocols.data(), NumProtocols, *Reader.getContext());
+ unsigned NumIvars = Record[Idx++];
+ llvm::SmallVector<ObjCIvarDecl *, 16> IVars;
+ IVars.reserve(NumIvars);
+ for (unsigned I = 0; I != NumIvars; ++I)
+ IVars.push_back(cast<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
+ ID->setIVarList(IVars.data(), NumIvars, *Reader.getContext());
+ ID->setCategoryList(
+ cast_or_null<ObjCCategoryDecl>(Reader.GetDecl(Record[Idx++])));
+ ID->setForwardDecl(Record[Idx++]);
+ ID->setImplicitInterfaceDecl(Record[Idx++]);
+ ID->setClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ ID->setSuperClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ ID->setAtEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+}
+
+void PCHDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
+ VisitFieldDecl(IVD);
+ IVD->setAccessControl((ObjCIvarDecl::AccessControl)Record[Idx++]);
+}
+
+void PCHDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
+ VisitObjCContainerDecl(PD);
+ PD->setForwardDecl(Record[Idx++]);
+ PD->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ unsigned NumProtoRefs = Record[Idx++];
+ llvm::SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++])));
+ PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, *Reader.getContext());
+}
+
+void PCHDeclReader::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *FD) {
+ VisitFieldDecl(FD);
+}
+
+void PCHDeclReader::VisitObjCClassDecl(ObjCClassDecl *CD) {
+ VisitDecl(CD);
+ unsigned NumClassRefs = Record[Idx++];
+ llvm::SmallVector<ObjCInterfaceDecl *, 16> ClassRefs;
+ ClassRefs.reserve(NumClassRefs);
+ for (unsigned I = 0; I != NumClassRefs; ++I)
+ ClassRefs.push_back(cast<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
+ CD->setClassList(*Reader.getContext(), ClassRefs.data(), NumClassRefs);
+}
+
+void PCHDeclReader::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *FPD) {
+ VisitDecl(FPD);
+ unsigned NumProtoRefs = Record[Idx++];
+ llvm::SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++])));
+ FPD->setProtocolList(ProtoRefs.data(), NumProtoRefs, *Reader.getContext());
+}
+
+void PCHDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
+ VisitObjCContainerDecl(CD);
+ CD->setClassInterface(cast<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
+ unsigned NumProtoRefs = Record[Idx++];
+ llvm::SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++])));
+ CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, *Reader.getContext());
+ CD->setNextClassCategory(cast_or_null<ObjCCategoryDecl>(Reader.GetDecl(Record[Idx++])));
+ CD->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+}
+
+void PCHDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
+ VisitNamedDecl(CAD);
+ CAD->setClassInterface(cast<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
+}
+
+void PCHDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ VisitNamedDecl(D);
+ D->setType(Reader.GetType(Record[Idx++]));
+ // FIXME: stable encoding
+ D->setPropertyAttributes(
+ (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ // FIXME: stable encoding
+ D->setPropertyImplementation(
+ (ObjCPropertyDecl::PropertyControl)Record[Idx++]);
+ D->setGetterName(Reader.ReadDeclarationName(Record, Idx).getObjCSelector());
+ D->setSetterName(Reader.ReadDeclarationName(Record, Idx).getObjCSelector());
+ D->setGetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
+ D->setSetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
+ D->setPropertyIvarDecl(
+ cast_or_null<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
+}
+
+void PCHDeclReader::VisitObjCImplDecl(ObjCImplDecl *D) {
+ VisitNamedDecl(D);
+ D->setClassInterface(
+ cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
+ D->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++]));
+}
+
+void PCHDeclReader::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ VisitObjCImplDecl(D);
+ D->setIdentifier(Reader.GetIdentifierInfo(Record, Idx));
+}
+
+void PCHDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ VisitObjCImplDecl(D);
+ D->setSuperClass(
+ cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
+}
+
+
+void PCHDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ VisitDecl(D);
+ D->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ D->setPropertyDecl(
+ cast_or_null<ObjCPropertyDecl>(Reader.GetDecl(Record[Idx++])));
+ D->setPropertyIvarDecl(
+ cast_or_null<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
+}
+
+void PCHDeclReader::VisitFieldDecl(FieldDecl *FD) {
+ VisitValueDecl(FD);
+ FD->setMutable(Record[Idx++]);
+ if (Record[Idx++])
+ FD->setBitWidth(Reader.ReadDeclExpr());
+}
+
+void PCHDeclReader::VisitVarDecl(VarDecl *VD) {
+ VisitValueDecl(VD);
+ VD->setStorageClass((VarDecl::StorageClass)Record[Idx++]);
+ VD->setThreadSpecified(Record[Idx++]);
+ VD->setCXXDirectInitializer(Record[Idx++]);
+ VD->setDeclaredInCondition(Record[Idx++]);
+ VD->setPreviousDeclaration(
+ cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++])));
+ VD->setTypeSpecStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ if (Record[Idx++])
+ VD->setInit(*Reader.getContext(), Reader.ReadDeclExpr());
+}
+
+void PCHDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) {
+ VisitVarDecl(PD);
+}
+
+void PCHDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
+ VisitVarDecl(PD);
+ PD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
+ // FIXME: default argument (C++ only)
+}
+
+void PCHDeclReader::VisitOriginalParmVarDecl(OriginalParmVarDecl *PD) {
+ VisitParmVarDecl(PD);
+ PD->setOriginalType(Reader.GetType(Record[Idx++]));
+}
+
+void PCHDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) {
+ VisitDecl(AD);
+ AD->setAsmString(cast<StringLiteral>(Reader.ReadDeclExpr()));
+}
+
+void PCHDeclReader::VisitBlockDecl(BlockDecl *BD) {
+ VisitDecl(BD);
+ BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadDeclStmt()));
+ unsigned NumParams = Record[Idx++];
+ llvm::SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
+ BD->setParams(*Reader.getContext(), Params.data(), NumParams);
+}
+
+std::pair<uint64_t, uint64_t>
+PCHDeclReader::VisitDeclContext(DeclContext *DC) {
+ uint64_t LexicalOffset = Record[Idx++];
+ uint64_t VisibleOffset = Record[Idx++];
+ return std::make_pair(LexicalOffset, VisibleOffset);
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Reading
+//===----------------------------------------------------------------------===//
+
+/// \brief Reads attributes from the current stream position.
+Attr *PCHReader::ReadAttributes() {
+ unsigned Code = DeclsCursor.ReadCode();
+ assert(Code == llvm::bitc::UNABBREV_RECORD &&
+ "Expected unabbreviated record"); (void)Code;
+
+ RecordData Record;
+ unsigned Idx = 0;
+ unsigned RecCode = DeclsCursor.ReadRecord(Code, Record);
+ assert(RecCode == pch::DECL_ATTR && "Expected attribute record");
+ (void)RecCode;
+
+#define SIMPLE_ATTR(Name) \
+ case Attr::Name: \
+ New = ::new (*Context) Name##Attr(); \
+ break
+
+#define STRING_ATTR(Name) \
+ case Attr::Name: \
+ New = ::new (*Context) Name##Attr(ReadString(Record, Idx)); \
+ break
+
+#define UNSIGNED_ATTR(Name) \
+ case Attr::Name: \
+ New = ::new (*Context) Name##Attr(Record[Idx++]); \
+ break
+
+ Attr *Attrs = 0;
+ while (Idx < Record.size()) {
+ Attr *New = 0;
+ Attr::Kind Kind = (Attr::Kind)Record[Idx++];
+ bool IsInherited = Record[Idx++];
+
+ switch (Kind) {
+ STRING_ATTR(Alias);
+ UNSIGNED_ATTR(Aligned);
+ SIMPLE_ATTR(AlwaysInline);
+ SIMPLE_ATTR(AnalyzerNoReturn);
+ STRING_ATTR(Annotate);
+ STRING_ATTR(AsmLabel);
+
+ case Attr::Blocks:
+ New = ::new (*Context) BlocksAttr(
+ (BlocksAttr::BlocksAttrTypes)Record[Idx++]);
+ break;
+
+ case Attr::Cleanup:
+ New = ::new (*Context) CleanupAttr(
+ cast<FunctionDecl>(GetDecl(Record[Idx++])));
+ break;
+
+ SIMPLE_ATTR(Const);
+ UNSIGNED_ATTR(Constructor);
+ SIMPLE_ATTR(DLLExport);
+ SIMPLE_ATTR(DLLImport);
+ SIMPLE_ATTR(Deprecated);
+ UNSIGNED_ATTR(Destructor);
+ SIMPLE_ATTR(FastCall);
+
+ case Attr::Format: {
+ std::string Type = ReadString(Record, Idx);
+ unsigned FormatIdx = Record[Idx++];
+ unsigned FirstArg = Record[Idx++];
+ New = ::new (*Context) FormatAttr(Type, FormatIdx, FirstArg);
+ break;
+ }
+
+ case Attr::FormatArg: {
+ unsigned FormatIdx = Record[Idx++];
+ New = ::new (*Context) FormatArgAttr(FormatIdx);
+ break;
+ }
+
+ case Attr::Sentinel: {
+ int sentinel = Record[Idx++];
+ int nullPos = Record[Idx++];
+ New = ::new (*Context) SentinelAttr(sentinel, nullPos);
+ break;
+ }
+
+ SIMPLE_ATTR(GNUInline);
+
+ case Attr::IBOutletKind:
+ New = ::new (*Context) IBOutletAttr();
+ break;
+
+ SIMPLE_ATTR(NoReturn);
+ SIMPLE_ATTR(NoThrow);
+ SIMPLE_ATTR(Nodebug);
+ SIMPLE_ATTR(Noinline);
+
+ case Attr::NonNull: {
+ unsigned Size = Record[Idx++];
+ llvm::SmallVector<unsigned, 16> ArgNums;
+ ArgNums.insert(ArgNums.end(), &Record[Idx], &Record[Idx] + Size);
+ Idx += Size;
+ New = ::new (*Context) NonNullAttr(ArgNums.data(), Size);
+ break;
+ }
+
+ SIMPLE_ATTR(ObjCException);
+ SIMPLE_ATTR(ObjCNSObject);
+ SIMPLE_ATTR(CFReturnsRetained);
+ SIMPLE_ATTR(NSReturnsRetained);
+ SIMPLE_ATTR(Overloadable);
+ UNSIGNED_ATTR(Packed);
+ SIMPLE_ATTR(Pure);
+ UNSIGNED_ATTR(Regparm);
+ STRING_ATTR(Section);
+ SIMPLE_ATTR(StdCall);
+ SIMPLE_ATTR(TransparentUnion);
+ SIMPLE_ATTR(Unavailable);
+ SIMPLE_ATTR(Unused);
+ SIMPLE_ATTR(Used);
+
+ case Attr::Visibility:
+ New = ::new (*Context) VisibilityAttr(
+ (VisibilityAttr::VisibilityTypes)Record[Idx++]);
+ break;
+
+ SIMPLE_ATTR(WarnUnusedResult);
+ SIMPLE_ATTR(Weak);
+ SIMPLE_ATTR(WeakImport);
+ }
+
+ assert(New && "Unable to decode attribute?");
+ New->setInherited(IsInherited);
+ New->setNext(Attrs);
+ Attrs = New;
+ }
+#undef UNSIGNED_ATTR
+#undef STRING_ATTR
+#undef SIMPLE_ATTR
+
+ // The list of attributes was built backwards. Reverse the list
+ // before returning it.
+ Attr *PrevAttr = 0, *NextAttr = 0;
+ while (Attrs) {
+ NextAttr = Attrs->getNext();
+ Attrs->setNext(PrevAttr);
+ PrevAttr = Attrs;
+ Attrs = NextAttr;
+ }
+
+ return PrevAttr;
+}
+
+//===----------------------------------------------------------------------===//
+// PCHReader Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Note that we have loaded the declaration with the given
+/// Index.
+///
+/// This routine notes that this declaration has already been loaded,
+/// so that future GetDecl calls will return this declaration rather
+/// than trying to load a new declaration.
+inline void PCHReader::LoadedDecl(unsigned Index, Decl *D) {
+ assert(!DeclsLoaded[Index] && "Decl loaded twice?");
+ DeclsLoaded[Index] = D;
+}
+
+
+/// \brief Determine whether the consumer will be interested in seeing
+/// this declaration (via HandleTopLevelDecl).
+///
+/// This routine should return true for anything that might affect
+/// code generation, e.g., inline function definitions, Objective-C
+/// declarations with metadata, etc.
+static bool isConsumerInterestedIn(Decl *D) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(D))
+ return Var->isFileVarDecl() && Var->getInit();
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(D))
+ return Func->isThisDeclarationADefinition();
+ return isa<ObjCProtocolDecl>(D);
+}
+
+/// \brief Read the declaration at the given offset from the PCH file.
+Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this declaration.
+ SavedStreamPosition SavedPosition(DeclsCursor);
+
+ DeclsCursor.JumpToBit(Offset);
+ RecordData Record;
+ unsigned Code = DeclsCursor.ReadCode();
+ unsigned Idx = 0;
+ PCHDeclReader Reader(*this, Record, Idx);
+
+ Decl *D = 0;
+ switch ((pch::DeclCode)DeclsCursor.ReadRecord(Code, Record)) {
+ case pch::DECL_ATTR:
+ case pch::DECL_CONTEXT_LEXICAL:
+ case pch::DECL_CONTEXT_VISIBLE:
+ assert(false && "Record cannot be de-serialized with ReadDeclRecord");
+ break;
+ case pch::DECL_TRANSLATION_UNIT:
+ assert(Index == 0 && "Translation unit must be at index 0");
+ D = Context->getTranslationUnitDecl();
+ break;
+ case pch::DECL_TYPEDEF:
+ D = TypedefDecl::Create(*Context, 0, SourceLocation(), 0, QualType());
+ break;
+ case pch::DECL_ENUM:
+ D = EnumDecl::Create(*Context, 0, SourceLocation(), 0, 0);
+ break;
+ case pch::DECL_RECORD:
+ D = RecordDecl::Create(*Context, TagDecl::TK_struct, 0, SourceLocation(),
+ 0, 0);
+ break;
+ case pch::DECL_ENUM_CONSTANT:
+ D = EnumConstantDecl::Create(*Context, 0, SourceLocation(), 0, QualType(),
+ 0, llvm::APSInt());
+ break;
+ case pch::DECL_FUNCTION:
+ D = FunctionDecl::Create(*Context, 0, SourceLocation(), DeclarationName(),
+ QualType());
+ break;
+ case pch::DECL_OBJC_METHOD:
+ D = ObjCMethodDecl::Create(*Context, SourceLocation(), SourceLocation(),
+ Selector(), QualType(), 0);
+ break;
+ case pch::DECL_OBJC_INTERFACE:
+ D = ObjCInterfaceDecl::Create(*Context, 0, SourceLocation(), 0);
+ break;
+ case pch::DECL_OBJC_IVAR:
+ D = ObjCIvarDecl::Create(*Context, 0, SourceLocation(), 0, QualType(),
+ ObjCIvarDecl::None);
+ break;
+ case pch::DECL_OBJC_PROTOCOL:
+ D = ObjCProtocolDecl::Create(*Context, 0, SourceLocation(), 0);
+ break;
+ case pch::DECL_OBJC_AT_DEFS_FIELD:
+ D = ObjCAtDefsFieldDecl::Create(*Context, 0, SourceLocation(), 0,
+ QualType(), 0);
+ break;
+ case pch::DECL_OBJC_CLASS:
+ D = ObjCClassDecl::Create(*Context, 0, SourceLocation());
+ break;
+ case pch::DECL_OBJC_FORWARD_PROTOCOL:
+ D = ObjCForwardProtocolDecl::Create(*Context, 0, SourceLocation());
+ break;
+ case pch::DECL_OBJC_CATEGORY:
+ D = ObjCCategoryDecl::Create(*Context, 0, SourceLocation(), 0);
+ break;
+ case pch::DECL_OBJC_CATEGORY_IMPL:
+ D = ObjCCategoryImplDecl::Create(*Context, 0, SourceLocation(), 0, 0);
+ break;
+ case pch::DECL_OBJC_IMPLEMENTATION:
+ D = ObjCImplementationDecl::Create(*Context, 0, SourceLocation(), 0, 0);
+ break;
+ case pch::DECL_OBJC_COMPATIBLE_ALIAS:
+ D = ObjCCompatibleAliasDecl::Create(*Context, 0, SourceLocation(), 0, 0);
+ break;
+ case pch::DECL_OBJC_PROPERTY:
+ D = ObjCPropertyDecl::Create(*Context, 0, SourceLocation(), 0, QualType());
+ break;
+ case pch::DECL_OBJC_PROPERTY_IMPL:
+ D = ObjCPropertyImplDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(), 0,
+ ObjCPropertyImplDecl::Dynamic, 0);
+ break;
+ case pch::DECL_FIELD:
+ D = FieldDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), 0,
+ false);
+ break;
+ case pch::DECL_VAR:
+ D = VarDecl::Create(*Context, 0, SourceLocation(), 0, QualType(),
+ VarDecl::None, SourceLocation());
+ break;
+
+ case pch::DECL_IMPLICIT_PARAM:
+ D = ImplicitParamDecl::Create(*Context, 0, SourceLocation(), 0, QualType());
+ break;
+
+ case pch::DECL_PARM_VAR:
+ D = ParmVarDecl::Create(*Context, 0, SourceLocation(), 0, QualType(),
+ VarDecl::None, 0);
+ break;
+ case pch::DECL_ORIGINAL_PARM_VAR:
+ D = OriginalParmVarDecl::Create(*Context, 0, SourceLocation(), 0,
+ QualType(), QualType(), VarDecl::None, 0);
+ break;
+ case pch::DECL_FILE_SCOPE_ASM:
+ D = FileScopeAsmDecl::Create(*Context, 0, SourceLocation(), 0);
+ break;
+ case pch::DECL_BLOCK:
+ D = BlockDecl::Create(*Context, 0, SourceLocation());
+ break;
+ }
+
+ assert(D && "Unknown declaration reading PCH file");
+ LoadedDecl(Index, D);
+ Reader.Visit(D);
+
+ // If this declaration is also a declaration context, get the
+ // offsets for its tables of lexical and visible declarations.
+ if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
+ std::pair<uint64_t, uint64_t> Offsets = Reader.VisitDeclContext(DC);
+ if (Offsets.first || Offsets.second) {
+ DC->setHasExternalLexicalStorage(Offsets.first != 0);
+ DC->setHasExternalVisibleStorage(Offsets.second != 0);
+ DeclContextOffsets[DC] = Offsets;
+ }
+ }
+ assert(Idx == Record.size());
+
+ // If we have deserialized a declaration that has a definition the
+ // AST consumer might need to know about, notify the consumer
+ // about that definition now or queue it for later.
+ if (isConsumerInterestedIn(D)) {
+ if (Consumer) {
+ DeclGroupRef DG(D);
+ Consumer->HandleTopLevelDecl(DG);
+ } else {
+ InterestingDecls.push_back(D);
+ }
+ }
+
+ return D;
+}
+
diff --git a/lib/Frontend/PCHReaderStmt.cpp b/lib/Frontend/PCHReaderStmt.cpp
new file mode 100644
index 0000000..10059f6
--- /dev/null
+++ b/lib/Frontend/PCHReaderStmt.cpp
@@ -0,0 +1,1136 @@
+//===--- PCHReaderStmt.cpp - Stmt/Expr Deserialization ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Statement/expression deserialization. This implements the
+// PCHReader::ReadStmt method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PCHReader.h"
+#include "clang/AST/StmtVisitor.h"
+using namespace clang;
+
+namespace {
+ class PCHStmtReader : public StmtVisitor<PCHStmtReader, unsigned> {
+ PCHReader &Reader;
+ const PCHReader::RecordData &Record;
+ unsigned &Idx;
+ llvm::SmallVectorImpl<Stmt *> &StmtStack;
+
+ public:
+ PCHStmtReader(PCHReader &Reader, const PCHReader::RecordData &Record,
+ unsigned &Idx, llvm::SmallVectorImpl<Stmt *> &StmtStack)
+ : Reader(Reader), Record(Record), Idx(Idx), StmtStack(StmtStack) { }
+
+ /// \brief The number of record fields required for the Stmt class
+ /// itself.
+ static const unsigned NumStmtFields = 0;
+
+ /// \brief The number of record fields required for the Expr class
+ /// itself.
+ static const unsigned NumExprFields = NumStmtFields + 3;
+
+ // Each of the Visit* functions reads in part of the expression
+ // from the given record and the current expression stack, then
+ // return the total number of operands that it read from the
+ // expression stack.
+
+ unsigned VisitStmt(Stmt *S);
+ unsigned VisitNullStmt(NullStmt *S);
+ unsigned VisitCompoundStmt(CompoundStmt *S);
+ unsigned VisitSwitchCase(SwitchCase *S);
+ unsigned VisitCaseStmt(CaseStmt *S);
+ unsigned VisitDefaultStmt(DefaultStmt *S);
+ unsigned VisitLabelStmt(LabelStmt *S);
+ unsigned VisitIfStmt(IfStmt *S);
+ unsigned VisitSwitchStmt(SwitchStmt *S);
+ unsigned VisitWhileStmt(WhileStmt *S);
+ unsigned VisitDoStmt(DoStmt *S);
+ unsigned VisitForStmt(ForStmt *S);
+ unsigned VisitGotoStmt(GotoStmt *S);
+ unsigned VisitIndirectGotoStmt(IndirectGotoStmt *S);
+ unsigned VisitContinueStmt(ContinueStmt *S);
+ unsigned VisitBreakStmt(BreakStmt *S);
+ unsigned VisitReturnStmt(ReturnStmt *S);
+ unsigned VisitDeclStmt(DeclStmt *S);
+ unsigned VisitAsmStmt(AsmStmt *S);
+ unsigned VisitExpr(Expr *E);
+ unsigned VisitPredefinedExpr(PredefinedExpr *E);
+ unsigned VisitDeclRefExpr(DeclRefExpr *E);
+ unsigned VisitIntegerLiteral(IntegerLiteral *E);
+ unsigned VisitFloatingLiteral(FloatingLiteral *E);
+ unsigned VisitImaginaryLiteral(ImaginaryLiteral *E);
+ unsigned VisitStringLiteral(StringLiteral *E);
+ unsigned VisitCharacterLiteral(CharacterLiteral *E);
+ unsigned VisitParenExpr(ParenExpr *E);
+ unsigned VisitUnaryOperator(UnaryOperator *E);
+ unsigned VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E);
+ unsigned VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ unsigned VisitCallExpr(CallExpr *E);
+ unsigned VisitMemberExpr(MemberExpr *E);
+ unsigned VisitCastExpr(CastExpr *E);
+ unsigned VisitBinaryOperator(BinaryOperator *E);
+ unsigned VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ unsigned VisitConditionalOperator(ConditionalOperator *E);
+ unsigned VisitImplicitCastExpr(ImplicitCastExpr *E);
+ unsigned VisitExplicitCastExpr(ExplicitCastExpr *E);
+ unsigned VisitCStyleCastExpr(CStyleCastExpr *E);
+ unsigned VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ unsigned VisitExtVectorElementExpr(ExtVectorElementExpr *E);
+ unsigned VisitInitListExpr(InitListExpr *E);
+ unsigned VisitDesignatedInitExpr(DesignatedInitExpr *E);
+ unsigned VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ unsigned VisitVAArgExpr(VAArgExpr *E);
+ unsigned VisitAddrLabelExpr(AddrLabelExpr *E);
+ unsigned VisitStmtExpr(StmtExpr *E);
+ unsigned VisitTypesCompatibleExpr(TypesCompatibleExpr *E);
+ unsigned VisitChooseExpr(ChooseExpr *E);
+ unsigned VisitGNUNullExpr(GNUNullExpr *E);
+ unsigned VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ unsigned VisitBlockExpr(BlockExpr *E);
+ unsigned VisitBlockDeclRefExpr(BlockDeclRefExpr *E);
+ unsigned VisitObjCStringLiteral(ObjCStringLiteral *E);
+ unsigned VisitObjCEncodeExpr(ObjCEncodeExpr *E);
+ unsigned VisitObjCSelectorExpr(ObjCSelectorExpr *E);
+ unsigned VisitObjCProtocolExpr(ObjCProtocolExpr *E);
+ unsigned VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
+ unsigned VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+ unsigned VisitObjCKVCRefExpr(ObjCKVCRefExpr *E);
+ unsigned VisitObjCMessageExpr(ObjCMessageExpr *E);
+ unsigned VisitObjCSuperExpr(ObjCSuperExpr *E);
+
+ unsigned VisitObjCForCollectionStmt(ObjCForCollectionStmt *);
+ unsigned VisitObjCAtCatchStmt(ObjCAtCatchStmt *);
+ unsigned VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *);
+ unsigned VisitObjCAtTryStmt(ObjCAtTryStmt *);
+ unsigned VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *);
+ unsigned VisitObjCAtThrowStmt(ObjCAtThrowStmt *);
+ };
+}
+
+unsigned PCHStmtReader::VisitStmt(Stmt *S) {
+ assert(Idx == NumStmtFields && "Incorrect statement field count");
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitNullStmt(NullStmt *S) {
+ VisitStmt(S);
+ S->setSemiLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitCompoundStmt(CompoundStmt *S) {
+ VisitStmt(S);
+ unsigned NumStmts = Record[Idx++];
+ S->setStmts(*Reader.getContext(),
+ StmtStack.data() + StmtStack.size() - NumStmts, NumStmts);
+ S->setLBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setRBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return NumStmts;
+}
+
+unsigned PCHStmtReader::VisitSwitchCase(SwitchCase *S) {
+ VisitStmt(S);
+ Reader.RecordSwitchCaseID(S, Record[Idx++]);
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitCaseStmt(CaseStmt *S) {
+ VisitSwitchCase(S);
+ S->setLHS(cast<Expr>(StmtStack[StmtStack.size() - 3]));
+ S->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
+ S->setSubStmt(StmtStack.back());
+ S->setCaseLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setEllipsisLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 3;
+}
+
+unsigned PCHStmtReader::VisitDefaultStmt(DefaultStmt *S) {
+ VisitSwitchCase(S);
+ S->setSubStmt(StmtStack.back());
+ S->setDefaultLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitLabelStmt(LabelStmt *S) {
+ VisitStmt(S);
+ S->setID(Reader.GetIdentifierInfo(Record, Idx));
+ S->setSubStmt(StmtStack.back());
+ S->setIdentLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ Reader.RecordLabelStmt(S, Record[Idx++]);
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitIfStmt(IfStmt *S) {
+ VisitStmt(S);
+ S->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3]));
+ S->setThen(StmtStack[StmtStack.size() - 2]);
+ S->setElse(StmtStack[StmtStack.size() - 1]);
+ S->setIfLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setElseLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 3;
+}
+
+unsigned PCHStmtReader::VisitSwitchStmt(SwitchStmt *S) {
+ VisitStmt(S);
+ S->setCond(cast<Expr>(StmtStack[StmtStack.size() - 2]));
+ S->setBody(StmtStack.back());
+ S->setSwitchLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ SwitchCase *PrevSC = 0;
+ for (unsigned N = Record.size(); Idx != N; ++Idx) {
+ SwitchCase *SC = Reader.getSwitchCaseWithID(Record[Idx]);
+ if (PrevSC)
+ PrevSC->setNextSwitchCase(SC);
+ else
+ S->setSwitchCaseList(SC);
+ PrevSC = SC;
+ }
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitWhileStmt(WhileStmt *S) {
+ VisitStmt(S);
+ S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
+ S->setBody(StmtStack.back());
+ S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitDoStmt(DoStmt *S) {
+ VisitStmt(S);
+ S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
+ S->setBody(StmtStack.back());
+ S->setDoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitForStmt(ForStmt *S) {
+ VisitStmt(S);
+ S->setInit(StmtStack[StmtStack.size() - 4]);
+ S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 3]));
+ S->setInc(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
+ S->setBody(StmtStack.back());
+ S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 4;
+}
+
+unsigned PCHStmtReader::VisitGotoStmt(GotoStmt *S) {
+ VisitStmt(S);
+ Reader.SetLabelOf(S, Record[Idx++]);
+ S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ VisitStmt(S);
+ S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setStarLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setTarget(cast_or_null<Expr>(StmtStack.back()));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitContinueStmt(ContinueStmt *S) {
+ VisitStmt(S);
+ S->setContinueLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitBreakStmt(BreakStmt *S) {
+ VisitStmt(S);
+ S->setBreakLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitReturnStmt(ReturnStmt *S) {
+ VisitStmt(S);
+ S->setRetValue(cast_or_null<Expr>(StmtStack.back()));
+ S->setReturnLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitDeclStmt(DeclStmt *S) {
+ VisitStmt(S);
+ S->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+
+ if (Idx + 1 == Record.size()) {
+ // Single declaration
+ S->setDeclGroup(DeclGroupRef(Reader.GetDecl(Record[Idx++])));
+ } else {
+ llvm::SmallVector<Decl *, 16> Decls;
+ Decls.reserve(Record.size() - Idx);
+ for (unsigned N = Record.size(); Idx != N; ++Idx)
+ Decls.push_back(Reader.GetDecl(Record[Idx]));
+ S->setDeclGroup(DeclGroupRef(DeclGroup::Create(*Reader.getContext(),
+ Decls.data(),
+ Decls.size())));
+ }
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitAsmStmt(AsmStmt *S) {
+ VisitStmt(S);
+ unsigned NumOutputs = Record[Idx++];
+ unsigned NumInputs = Record[Idx++];
+ unsigned NumClobbers = Record[Idx++];
+ S->setAsmLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setVolatile(Record[Idx++]);
+ S->setSimple(Record[Idx++]);
+
+ unsigned StackIdx
+ = StmtStack.size() - (NumOutputs*2 + NumInputs*2 + NumClobbers + 1);
+ S->setAsmString(cast_or_null<StringLiteral>(StmtStack[StackIdx++]));
+
+ // Outputs and inputs
+ llvm::SmallVector<std::string, 16> Names;
+ llvm::SmallVector<StringLiteral*, 16> Constraints;
+ llvm::SmallVector<Stmt*, 16> Exprs;
+ for (unsigned I = 0, N = NumOutputs + NumInputs; I != N; ++I) {
+ Names.push_back(Reader.ReadString(Record, Idx));
+ Constraints.push_back(cast_or_null<StringLiteral>(StmtStack[StackIdx++]));
+ Exprs.push_back(StmtStack[StackIdx++]);
+ }
+ S->setOutputsAndInputs(NumOutputs, NumInputs,
+ Names.data(), Constraints.data(), Exprs.data());
+
+ // Constraints
+ llvm::SmallVector<StringLiteral*, 16> Clobbers;
+ for (unsigned I = 0; I != NumClobbers; ++I)
+ Clobbers.push_back(cast_or_null<StringLiteral>(StmtStack[StackIdx++]));
+ S->setClobbers(Clobbers.data(), NumClobbers);
+
+ assert(StackIdx == StmtStack.size() && "Error deserializing AsmStmt");
+ return NumOutputs*2 + NumInputs*2 + NumClobbers + 1;
+}
+
+unsigned PCHStmtReader::VisitExpr(Expr *E) {
+ VisitStmt(E);
+ E->setType(Reader.GetType(Record[Idx++]));
+ E->setTypeDependent(Record[Idx++]);
+ E->setValueDependent(Record[Idx++]);
+ assert(Idx == NumExprFields && "Incorrect expression field count");
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
+ VisitExpr(E);
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setIdentType((PredefinedExpr::IdentType)Record[Idx++]);
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
+ VisitExpr(E);
+ E->setDecl(cast<NamedDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
+ VisitExpr(E);
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setValue(Reader.ReadAPInt(Record, Idx));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
+ VisitExpr(E);
+ E->setValue(Reader.ReadAPFloat(Record, Idx));
+ E->setExact(Record[Idx++]);
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ VisitExpr(E);
+ E->setSubExpr(cast<Expr>(StmtStack.back()));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitStringLiteral(StringLiteral *E) {
+ VisitExpr(E);
+ unsigned Len = Record[Idx++];
+ assert(Record[Idx] == E->getNumConcatenated() &&
+ "Wrong number of concatenated tokens!");
+ ++Idx;
+ E->setWide(Record[Idx++]);
+
+ // Read string data
+ llvm::SmallVector<char, 16> Str(&Record[Idx], &Record[Idx] + Len);
+ E->setStrData(*Reader.getContext(), Str.data(), Len);
+ Idx += Len;
+
+ // Read source locations
+ for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ E->setStrTokenLoc(I, SourceLocation::getFromRawEncoding(Record[Idx++]));
+
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setWide(Record[Idx++]);
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitParenExpr(ParenExpr *E) {
+ VisitExpr(E);
+ E->setLParen(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParen(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setSubExpr(cast<Expr>(StmtStack.back()));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitUnaryOperator(UnaryOperator *E) {
+ VisitExpr(E);
+ E->setSubExpr(cast<Expr>(StmtStack.back()));
+ E->setOpcode((UnaryOperator::Opcode)Record[Idx++]);
+ E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
+ VisitExpr(E);
+ E->setSizeof(Record[Idx++]);
+ if (Record[Idx] == 0) {
+ E->setArgument(cast<Expr>(StmtStack.back()));
+ ++Idx;
+ } else {
+ E->setArgument(Reader.GetType(Record[Idx++]));
+ }
+ E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return E->isArgumentType()? 0 : 1;
+}
+
+unsigned PCHStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ VisitExpr(E);
+ E->setLHS(cast<Expr>(StmtStack[StmtStack.size() - 2]));
+ E->setRHS(cast<Expr>(StmtStack[StmtStack.size() - 1]));
+ E->setRBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitCallExpr(CallExpr *E) {
+ VisitExpr(E);
+ E->setNumArgs(*Reader.getContext(), Record[Idx++]);
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setCallee(cast<Expr>(StmtStack[StmtStack.size() - E->getNumArgs() - 1]));
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I]));
+ return E->getNumArgs() + 1;
+}
+
+unsigned PCHStmtReader::VisitMemberExpr(MemberExpr *E) {
+ VisitExpr(E);
+ E->setBase(cast<Expr>(StmtStack.back()));
+ E->setMemberDecl(cast<NamedDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setMemberLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setArrow(Record[Idx++]);
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitCastExpr(CastExpr *E) {
+ VisitExpr(E);
+ E->setSubExpr(cast<Expr>(StmtStack.back()));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitBinaryOperator(BinaryOperator *E) {
+ VisitExpr(E);
+ E->setLHS(cast<Expr>(StmtStack.end()[-2]));
+ E->setRHS(cast<Expr>(StmtStack.end()[-1]));
+ E->setOpcode((BinaryOperator::Opcode)Record[Idx++]);
+ E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ VisitBinaryOperator(E);
+ E->setComputationLHSType(Reader.GetType(Record[Idx++]));
+ E->setComputationResultType(Reader.GetType(Record[Idx++]));
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitConditionalOperator(ConditionalOperator *E) {
+ VisitExpr(E);
+ E->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3]));
+ E->setLHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
+ E->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 1]));
+ return 3;
+}
+
+unsigned PCHStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ VisitCastExpr(E);
+ E->setLvalueCast(Record[Idx++]);
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ VisitCastExpr(E);
+ E->setTypeAsWritten(Reader.GetType(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ VisitExpr(E);
+ E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setInitializer(cast<Expr>(StmtStack.back()));
+ E->setFileScope(Record[Idx++]);
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+ VisitExpr(E);
+ E->setBase(cast<Expr>(StmtStack.back()));
+ E->setAccessor(Reader.GetIdentifierInfo(Record, Idx));
+ E->setAccessorLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitInitListExpr(InitListExpr *E) {
+ VisitExpr(E);
+ unsigned NumInits = Record[Idx++];
+ E->reserveInits(NumInits);
+ for (unsigned I = 0; I != NumInits; ++I)
+ E->updateInit(I,
+ cast<Expr>(StmtStack[StmtStack.size() - NumInits - 1 + I]));
+ E->setSyntacticForm(cast_or_null<InitListExpr>(StmtStack.back()));
+ E->setLBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setInitializedFieldInUnion(
+ cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++])));
+ E->sawArrayRangeDesignator(Record[Idx++]);
+ return NumInits + 1;
+}
+
+unsigned PCHStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ typedef DesignatedInitExpr::Designator Designator;
+
+ VisitExpr(E);
+ unsigned NumSubExprs = Record[Idx++];
+ assert(NumSubExprs == E->getNumSubExprs() && "Wrong number of subexprs");
+ for (unsigned I = 0; I != NumSubExprs; ++I)
+ E->setSubExpr(I, cast<Expr>(StmtStack[StmtStack.size() - NumSubExprs + I]));
+ E->setEqualOrColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setGNUSyntax(Record[Idx++]);
+
+ llvm::SmallVector<Designator, 4> Designators;
+ while (Idx < Record.size()) {
+ switch ((pch::DesignatorTypes)Record[Idx++]) {
+ case pch::DESIG_FIELD_DECL: {
+ FieldDecl *Field = cast<FieldDecl>(Reader.GetDecl(Record[Idx++]));
+ SourceLocation DotLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation FieldLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
+ FieldLoc));
+ Designators.back().setField(Field);
+ break;
+ }
+
+ case pch::DESIG_FIELD_NAME: {
+ const IdentifierInfo *Name = Reader.GetIdentifierInfo(Record, Idx);
+ SourceLocation DotLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation FieldLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ Designators.push_back(Designator(Name, DotLoc, FieldLoc));
+ break;
+ }
+
+ case pch::DESIG_ARRAY: {
+ unsigned Index = Record[Idx++];
+ SourceLocation LBracketLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation RBracketLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ Designators.push_back(Designator(Index, LBracketLoc, RBracketLoc));
+ break;
+ }
+
+ case pch::DESIG_ARRAY_RANGE: {
+ unsigned Index = Record[Idx++];
+ SourceLocation LBracketLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation EllipsisLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ SourceLocation RBracketLoc
+ = SourceLocation::getFromRawEncoding(Record[Idx++]);
+ Designators.push_back(Designator(Index, LBracketLoc, EllipsisLoc,
+ RBracketLoc));
+ break;
+ }
+ }
+ }
+ E->setDesignators(Designators.data(), Designators.size());
+
+ return NumSubExprs;
+}
+
+unsigned PCHStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ VisitExpr(E);
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitVAArgExpr(VAArgExpr *E) {
+ VisitExpr(E);
+ E->setSubExpr(cast<Expr>(StmtStack.back()));
+ E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ VisitExpr(E);
+ E->setAmpAmpLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ Reader.SetLabelOf(E, Record[Idx++]);
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitStmtExpr(StmtExpr *E) {
+ VisitExpr(E);
+ E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setSubStmt(cast_or_null<CompoundStmt>(StmtStack.back()));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
+ VisitExpr(E);
+ E->setArgType1(Reader.GetType(Record[Idx++]));
+ E->setArgType2(Reader.GetType(Record[Idx++]));
+ E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitChooseExpr(ChooseExpr *E) {
+ VisitExpr(E);
+ E->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3]));
+ E->setLHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
+ E->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 1]));
+ E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 3;
+}
+
+unsigned PCHStmtReader::VisitGNUNullExpr(GNUNullExpr *E) {
+ VisitExpr(E);
+ E->setTokenLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ VisitExpr(E);
+ unsigned NumExprs = Record[Idx++];
+ E->setExprs((Expr **)&StmtStack[StmtStack.size() - NumExprs], NumExprs);
+ E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return NumExprs;
+}
+
+unsigned PCHStmtReader::VisitBlockExpr(BlockExpr *E) {
+ VisitExpr(E);
+ E->setBlockDecl(cast_or_null<BlockDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setHasBlockDeclRefExprs(Record[Idx++]);
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ VisitExpr(E);
+ E->setDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setByRef(Record[Idx++]);
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Objective-C Expressions and Statements
+
+unsigned PCHStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+ VisitExpr(E);
+ E->setString(cast<StringLiteral>(StmtStack.back()));
+ E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ VisitExpr(E);
+ E->setEncodedType(Reader.GetType(Record[Idx++]));
+ E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ VisitExpr(E);
+ E->setSelector(Reader.GetSelector(Record, Idx));
+ E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ VisitExpr(E);
+ E->setProtocol(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ VisitExpr(E);
+ E->setDecl(cast<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setBase(cast<Expr>(StmtStack.back()));
+ E->setIsArrow(Record[Idx++]);
+ E->setIsFreeIvar(Record[Idx++]);
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ VisitExpr(E);
+ E->setProperty(cast<ObjCPropertyDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setBase(cast<Expr>(StmtStack.back()));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ VisitExpr(E);
+ E->setGetterMethod(
+ cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setSetterMethod(
+ cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setClassProp(
+ cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++])));
+ E->setBase(cast_or_null<Expr>(StmtStack.back()));
+ E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ VisitExpr(E);
+ E->setNumArgs(Record[Idx++]);
+ E->setLeftLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setRightLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ E->setSelector(Reader.GetSelector(Record, Idx));
+ E->setMethodDecl(cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++])));
+
+ E->setReceiver(
+ cast_or_null<Expr>(StmtStack[StmtStack.size() - E->getNumArgs() - 1]));
+ if (!E->getReceiver()) {
+ ObjCMessageExpr::ClassInfo CI;
+ CI.first = cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++]));
+ CI.second = Reader.GetIdentifierInfo(Record, Idx);
+ E->setClassInfo(CI);
+ }
+
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I]));
+ return E->getNumArgs() + 1;
+}
+
+unsigned PCHStmtReader::VisitObjCSuperExpr(ObjCSuperExpr *E) {
+ VisitExpr(E);
+ E->setLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 0;
+}
+
+unsigned PCHStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+ S->setElement(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 3]));
+ S->setCollection(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2]));
+ S->setBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1]));
+ S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 3;
+}
+
+unsigned PCHStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ VisitStmt(S);
+ S->setCatchBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 2]));
+ S->setNextCatchStmt(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1]));
+ S->setCatchParamDecl(cast_or_null<ParmVarDecl>(Reader.GetDecl(Record[Idx++])));
+ S->setAtCatchLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ VisitStmt(S);
+ S->setFinallyBody(StmtStack.back());
+ S->setAtFinallyLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+unsigned PCHStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ VisitStmt(S);
+ S->setTryBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 3]));
+ S->setCatchStmts(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 2]));
+ S->setFinallyStmt(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1]));
+ S->setAtTryLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 3;
+}
+
+unsigned PCHStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ VisitStmt(S);
+ S->setSynchExpr(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 2]));
+ S->setSynchBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1]));
+ S->setAtSynchronizedLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 2;
+}
+
+unsigned PCHStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ VisitStmt(S);
+ S->setThrowExpr(StmtStack.back());
+ S->setThrowLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ return 1;
+}
+
+
+// Within the bitstream, expressions are stored in Reverse Polish
+// Notation, with each of the subexpressions preceding the
+// expression they are stored in. To evaluate expressions, we
+// continue reading expressions and placing them on the stack, with
+// expressions having operands removing those operands from the
+// stack. Evaluation terminates when we see a STMT_STOP record, and
+// the single remaining expression on the stack is our result.
+Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) {
+ RecordData Record;
+ unsigned Idx;
+ llvm::SmallVector<Stmt *, 16> StmtStack;
+ PCHStmtReader Reader(*this, Record, Idx, StmtStack);
+ Stmt::EmptyShell Empty;
+
+ while (true) {
+ unsigned Code = Cursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Cursor.ReadBlockEnd()) {
+ Error("error at end of block in PCH file");
+ return 0;
+ }
+ break;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ Cursor.ReadSubBlockID();
+ if (Cursor.SkipBlock()) {
+ Error("malformed block record in PCH file");
+ return 0;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Cursor.ReadAbbrevRecord();
+ continue;
+ }
+
+ Stmt *S = 0;
+ Idx = 0;
+ Record.clear();
+ bool Finished = false;
+ switch ((pch::StmtCode)Cursor.ReadRecord(Code, Record)) {
+ case pch::STMT_STOP:
+ Finished = true;
+ break;
+
+ case pch::STMT_NULL_PTR:
+ S = 0;
+ break;
+
+ case pch::STMT_NULL:
+ S = new (Context) NullStmt(Empty);
+ break;
+
+ case pch::STMT_COMPOUND:
+ S = new (Context) CompoundStmt(Empty);
+ break;
+
+ case pch::STMT_CASE:
+ S = new (Context) CaseStmt(Empty);
+ break;
+
+ case pch::STMT_DEFAULT:
+ S = new (Context) DefaultStmt(Empty);
+ break;
+
+ case pch::STMT_LABEL:
+ S = new (Context) LabelStmt(Empty);
+ break;
+
+ case pch::STMT_IF:
+ S = new (Context) IfStmt(Empty);
+ break;
+
+ case pch::STMT_SWITCH:
+ S = new (Context) SwitchStmt(Empty);
+ break;
+
+ case pch::STMT_WHILE:
+ S = new (Context) WhileStmt(Empty);
+ break;
+
+ case pch::STMT_DO:
+ S = new (Context) DoStmt(Empty);
+ break;
+
+ case pch::STMT_FOR:
+ S = new (Context) ForStmt(Empty);
+ break;
+
+ case pch::STMT_GOTO:
+ S = new (Context) GotoStmt(Empty);
+ break;
+
+ case pch::STMT_INDIRECT_GOTO:
+ S = new (Context) IndirectGotoStmt(Empty);
+ break;
+
+ case pch::STMT_CONTINUE:
+ S = new (Context) ContinueStmt(Empty);
+ break;
+
+ case pch::STMT_BREAK:
+ S = new (Context) BreakStmt(Empty);
+ break;
+
+ case pch::STMT_RETURN:
+ S = new (Context) ReturnStmt(Empty);
+ break;
+
+ case pch::STMT_DECL:
+ S = new (Context) DeclStmt(Empty);
+ break;
+
+ case pch::STMT_ASM:
+ S = new (Context) AsmStmt(Empty);
+ break;
+
+ case pch::EXPR_PREDEFINED:
+ S = new (Context) PredefinedExpr(Empty);
+ break;
+
+ case pch::EXPR_DECL_REF:
+ S = new (Context) DeclRefExpr(Empty);
+ break;
+
+ case pch::EXPR_INTEGER_LITERAL:
+ S = new (Context) IntegerLiteral(Empty);
+ break;
+
+ case pch::EXPR_FLOATING_LITERAL:
+ S = new (Context) FloatingLiteral(Empty);
+ break;
+
+ case pch::EXPR_IMAGINARY_LITERAL:
+ S = new (Context) ImaginaryLiteral(Empty);
+ break;
+
+ case pch::EXPR_STRING_LITERAL:
+ S = StringLiteral::CreateEmpty(*Context,
+ Record[PCHStmtReader::NumExprFields + 1]);
+ break;
+
+ case pch::EXPR_CHARACTER_LITERAL:
+ S = new (Context) CharacterLiteral(Empty);
+ break;
+
+ case pch::EXPR_PAREN:
+ S = new (Context) ParenExpr(Empty);
+ break;
+
+ case pch::EXPR_UNARY_OPERATOR:
+ S = new (Context) UnaryOperator(Empty);
+ break;
+
+ case pch::EXPR_SIZEOF_ALIGN_OF:
+ S = new (Context) SizeOfAlignOfExpr(Empty);
+ break;
+
+ case pch::EXPR_ARRAY_SUBSCRIPT:
+ S = new (Context) ArraySubscriptExpr(Empty);
+ break;
+
+ case pch::EXPR_CALL:
+ S = new (Context) CallExpr(*Context, Empty);
+ break;
+
+ case pch::EXPR_MEMBER:
+ S = new (Context) MemberExpr(Empty);
+ break;
+
+ case pch::EXPR_BINARY_OPERATOR:
+ S = new (Context) BinaryOperator(Empty);
+ break;
+
+ case pch::EXPR_COMPOUND_ASSIGN_OPERATOR:
+ S = new (Context) CompoundAssignOperator(Empty);
+ break;
+
+ case pch::EXPR_CONDITIONAL_OPERATOR:
+ S = new (Context) ConditionalOperator(Empty);
+ break;
+
+ case pch::EXPR_IMPLICIT_CAST:
+ S = new (Context) ImplicitCastExpr(Empty);
+ break;
+
+ case pch::EXPR_CSTYLE_CAST:
+ S = new (Context) CStyleCastExpr(Empty);
+ break;
+
+ case pch::EXPR_COMPOUND_LITERAL:
+ S = new (Context) CompoundLiteralExpr(Empty);
+ break;
+
+ case pch::EXPR_EXT_VECTOR_ELEMENT:
+ S = new (Context) ExtVectorElementExpr(Empty);
+ break;
+
+ case pch::EXPR_INIT_LIST:
+ S = new (Context) InitListExpr(Empty);
+ break;
+
+ case pch::EXPR_DESIGNATED_INIT:
+ S = DesignatedInitExpr::CreateEmpty(*Context,
+ Record[PCHStmtReader::NumExprFields] - 1);
+
+ break;
+
+ case pch::EXPR_IMPLICIT_VALUE_INIT:
+ S = new (Context) ImplicitValueInitExpr(Empty);
+ break;
+
+ case pch::EXPR_VA_ARG:
+ S = new (Context) VAArgExpr(Empty);
+ break;
+
+ case pch::EXPR_ADDR_LABEL:
+ S = new (Context) AddrLabelExpr(Empty);
+ break;
+
+ case pch::EXPR_STMT:
+ S = new (Context) StmtExpr(Empty);
+ break;
+
+ case pch::EXPR_TYPES_COMPATIBLE:
+ S = new (Context) TypesCompatibleExpr(Empty);
+ break;
+
+ case pch::EXPR_CHOOSE:
+ S = new (Context) ChooseExpr(Empty);
+ break;
+
+ case pch::EXPR_GNU_NULL:
+ S = new (Context) GNUNullExpr(Empty);
+ break;
+
+ case pch::EXPR_SHUFFLE_VECTOR:
+ S = new (Context) ShuffleVectorExpr(Empty);
+ break;
+
+ case pch::EXPR_BLOCK:
+ S = new (Context) BlockExpr(Empty);
+ break;
+
+ case pch::EXPR_BLOCK_DECL_REF:
+ S = new (Context) BlockDeclRefExpr(Empty);
+ break;
+
+ case pch::EXPR_OBJC_STRING_LITERAL:
+ S = new (Context) ObjCStringLiteral(Empty);
+ break;
+ case pch::EXPR_OBJC_ENCODE:
+ S = new (Context) ObjCEncodeExpr(Empty);
+ break;
+ case pch::EXPR_OBJC_SELECTOR_EXPR:
+ S = new (Context) ObjCSelectorExpr(Empty);
+ break;
+ case pch::EXPR_OBJC_PROTOCOL_EXPR:
+ S = new (Context) ObjCProtocolExpr(Empty);
+ break;
+ case pch::EXPR_OBJC_IVAR_REF_EXPR:
+ S = new (Context) ObjCIvarRefExpr(Empty);
+ break;
+ case pch::EXPR_OBJC_PROPERTY_REF_EXPR:
+ S = new (Context) ObjCPropertyRefExpr(Empty);
+ break;
+ case pch::EXPR_OBJC_KVC_REF_EXPR:
+ S = new (Context) ObjCKVCRefExpr(Empty);
+ break;
+ case pch::EXPR_OBJC_MESSAGE_EXPR:
+ S = new (Context) ObjCMessageExpr(Empty);
+ break;
+ case pch::EXPR_OBJC_SUPER_EXPR:
+ S = new (Context) ObjCSuperExpr(Empty);
+ break;
+ case pch::STMT_OBJC_FOR_COLLECTION:
+ S = new (Context) ObjCForCollectionStmt(Empty);
+ break;
+ case pch::STMT_OBJC_CATCH:
+ S = new (Context) ObjCAtCatchStmt(Empty);
+ break;
+ case pch::STMT_OBJC_FINALLY:
+ S = new (Context) ObjCAtFinallyStmt(Empty);
+ break;
+ case pch::STMT_OBJC_AT_TRY:
+ S = new (Context) ObjCAtTryStmt(Empty);
+ break;
+ case pch::STMT_OBJC_AT_SYNCHRONIZED:
+ S = new (Context) ObjCAtSynchronizedStmt(Empty);
+ break;
+ case pch::STMT_OBJC_AT_THROW:
+ S = new (Context) ObjCAtThrowStmt(Empty);
+ break;
+ }
+
+ // We hit a STMT_STOP, so we're done with this expression.
+ if (Finished)
+ break;
+
+ ++NumStatementsRead;
+
+ if (S) {
+ unsigned NumSubStmts = Reader.Visit(S);
+ while (NumSubStmts > 0) {
+ StmtStack.pop_back();
+ --NumSubStmts;
+ }
+ }
+
+ assert(Idx == Record.size() && "Invalid deserialization of statement");
+ StmtStack.push_back(S);
+ }
+ assert(StmtStack.size() == 1 && "Extra expressions on stack!");
+ SwitchCaseStmts.clear();
+ return StmtStack.back();
+}
diff --git a/lib/Frontend/PCHWriter.cpp b/lib/Frontend/PCHWriter.cpp
new file mode 100644
index 0000000..9f9b3b4
--- /dev/null
+++ b/lib/Frontend/PCHWriter.cpp
@@ -0,0 +1,1966 @@
+//===--- PCHWriter.h - Precompiled Headers Writer ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PCHWriter class, which writes a precompiled header.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PCHWriter.h"
+#include "../Sema/Sema.h" // FIXME: move header into include/clang/Sema
+#include "../Sema/IdentifierResolver.h" // FIXME: move header
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Version.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/System/Path.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Type serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VISIBILITY_HIDDEN PCHTypeWriter {
+ PCHWriter &Writer;
+ PCHWriter::RecordData &Record;
+
+ public:
+ /// \brief Type code that corresponds to the record generated.
+ pch::TypeCode Code;
+
+ PCHTypeWriter(PCHWriter &Writer, PCHWriter::RecordData &Record)
+ : Writer(Writer), Record(Record), Code(pch::TYPE_EXT_QUAL) { }
+
+ void VisitArrayType(const ArrayType *T);
+ void VisitFunctionType(const FunctionType *T);
+ void VisitTagType(const TagType *T);
+
+#define TYPE(Class, Base) void Visit##Class##Type(const Class##Type *T);
+#define ABSTRACT_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ };
+}
+
+void PCHTypeWriter::VisitExtQualType(const ExtQualType *T) {
+ Writer.AddTypeRef(QualType(T->getBaseType(), 0), Record);
+ Record.push_back(T->getObjCGCAttr()); // FIXME: use stable values
+ Record.push_back(T->getAddressSpace());
+ Code = pch::TYPE_EXT_QUAL;
+}
+
+void PCHTypeWriter::VisitBuiltinType(const BuiltinType *T) {
+ assert(false && "Built-in types are never serialized");
+}
+
+void PCHTypeWriter::VisitFixedWidthIntType(const FixedWidthIntType *T) {
+ Record.push_back(T->getWidth());
+ Record.push_back(T->isSigned());
+ Code = pch::TYPE_FIXED_WIDTH_INT;
+}
+
+void PCHTypeWriter::VisitComplexType(const ComplexType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Code = pch::TYPE_COMPLEX;
+}
+
+void PCHTypeWriter::VisitPointerType(const PointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = pch::TYPE_POINTER;
+}
+
+void PCHTypeWriter::VisitBlockPointerType(const BlockPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = pch::TYPE_BLOCK_POINTER;
+}
+
+void PCHTypeWriter::VisitLValueReferenceType(const LValueReferenceType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = pch::TYPE_LVALUE_REFERENCE;
+}
+
+void PCHTypeWriter::VisitRValueReferenceType(const RValueReferenceType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = pch::TYPE_RVALUE_REFERENCE;
+}
+
+void PCHTypeWriter::VisitMemberPointerType(const MemberPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Writer.AddTypeRef(QualType(T->getClass(), 0), Record);
+ Code = pch::TYPE_MEMBER_POINTER;
+}
+
+void PCHTypeWriter::VisitArrayType(const ArrayType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Record.push_back(T->getSizeModifier()); // FIXME: stable values
+ Record.push_back(T->getIndexTypeQualifier()); // FIXME: stable values
+}
+
+void PCHTypeWriter::VisitConstantArrayType(const ConstantArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddAPInt(T->getSize(), Record);
+ Code = pch::TYPE_CONSTANT_ARRAY;
+}
+
+void PCHTypeWriter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ VisitArrayType(T);
+ Code = pch::TYPE_INCOMPLETE_ARRAY;
+}
+
+void PCHTypeWriter::VisitVariableArrayType(const VariableArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddStmt(T->getSizeExpr());
+ Code = pch::TYPE_VARIABLE_ARRAY;
+}
+
+void PCHTypeWriter::VisitVectorType(const VectorType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Record.push_back(T->getNumElements());
+ Code = pch::TYPE_VECTOR;
+}
+
+void PCHTypeWriter::VisitExtVectorType(const ExtVectorType *T) {
+ VisitVectorType(T);
+ Code = pch::TYPE_EXT_VECTOR;
+}
+
+void PCHTypeWriter::VisitFunctionType(const FunctionType *T) {
+ Writer.AddTypeRef(T->getResultType(), Record);
+}
+
+void PCHTypeWriter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
+ VisitFunctionType(T);
+ Code = pch::TYPE_FUNCTION_NO_PROTO;
+}
+
+void PCHTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ VisitFunctionType(T);
+ Record.push_back(T->getNumArgs());
+ for (unsigned I = 0, N = T->getNumArgs(); I != N; ++I)
+ Writer.AddTypeRef(T->getArgType(I), Record);
+ Record.push_back(T->isVariadic());
+ Record.push_back(T->getTypeQuals());
+ Record.push_back(T->hasExceptionSpec());
+ Record.push_back(T->hasAnyExceptionSpec());
+ Record.push_back(T->getNumExceptions());
+ for (unsigned I = 0, N = T->getNumExceptions(); I != N; ++I)
+ Writer.AddTypeRef(T->getExceptionType(I), Record);
+ Code = pch::TYPE_FUNCTION_PROTO;
+}
+
+void PCHTypeWriter::VisitTypedefType(const TypedefType *T) {
+ Writer.AddDeclRef(T->getDecl(), Record);
+ Code = pch::TYPE_TYPEDEF;
+}
+
+void PCHTypeWriter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ Writer.AddStmt(T->getUnderlyingExpr());
+ Code = pch::TYPE_TYPEOF_EXPR;
+}
+
+void PCHTypeWriter::VisitTypeOfType(const TypeOfType *T) {
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
+ Code = pch::TYPE_TYPEOF;
+}
+
+void PCHTypeWriter::VisitTagType(const TagType *T) {
+ Writer.AddDeclRef(T->getDecl(), Record);
+ assert(!T->isBeingDefined() &&
+ "Cannot serialize in the middle of a type definition");
+}
+
+void PCHTypeWriter::VisitRecordType(const RecordType *T) {
+ VisitTagType(T);
+ Code = pch::TYPE_RECORD;
+}
+
+void PCHTypeWriter::VisitEnumType(const EnumType *T) {
+ VisitTagType(T);
+ Code = pch::TYPE_ENUM;
+}
+
+void
+PCHTypeWriter::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ // FIXME: Serialize this type (C++ only)
+ assert(false && "Cannot serialize template specialization types");
+}
+
+void PCHTypeWriter::VisitQualifiedNameType(const QualifiedNameType *T) {
+ // FIXME: Serialize this type (C++ only)
+ assert(false && "Cannot serialize qualified name types");
+}
+
+void PCHTypeWriter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ Writer.AddDeclRef(T->getDecl(), Record);
+ Code = pch::TYPE_OBJC_INTERFACE;
+}
+
+void
+PCHTypeWriter::VisitObjCQualifiedInterfaceType(
+ const ObjCQualifiedInterfaceType *T) {
+ VisitObjCInterfaceType(T);
+ Record.push_back(T->getNumProtocols());
+ for (ObjCInterfaceType::qual_iterator I = T->qual_begin(),
+ E = T->qual_end(); I != E; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = pch::TYPE_OBJC_QUALIFIED_INTERFACE;
+}
+
+void PCHTypeWriter::VisitObjCQualifiedIdType(const ObjCQualifiedIdType *T) {
+ Record.push_back(T->getNumProtocols());
+ for (ObjCQualifiedIdType::qual_iterator I = T->qual_begin(),
+ E = T->qual_end(); I != E; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = pch::TYPE_OBJC_QUALIFIED_ID;
+}
+
+//===----------------------------------------------------------------------===//
+// PCHWriter Implementation
+//===----------------------------------------------------------------------===//
+
+static void EmitBlockID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ PCHWriter::RecordData &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
+
+ // Emit the block name if present.
+ if (Name == 0 || Name[0] == 0) return;
+ Record.clear();
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
+}
+
+static void EmitRecordID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ PCHWriter::RecordData &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Record);
+}
+
+static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
+ PCHWriter::RecordData &Record) {
+#define RECORD(X) EmitRecordID(pch::X, #X, Stream, Record)
+ RECORD(STMT_STOP);
+ RECORD(STMT_NULL_PTR);
+ RECORD(STMT_NULL);
+ RECORD(STMT_COMPOUND);
+ RECORD(STMT_CASE);
+ RECORD(STMT_DEFAULT);
+ RECORD(STMT_LABEL);
+ RECORD(STMT_IF);
+ RECORD(STMT_SWITCH);
+ RECORD(STMT_WHILE);
+ RECORD(STMT_DO);
+ RECORD(STMT_FOR);
+ RECORD(STMT_GOTO);
+ RECORD(STMT_INDIRECT_GOTO);
+ RECORD(STMT_CONTINUE);
+ RECORD(STMT_BREAK);
+ RECORD(STMT_RETURN);
+ RECORD(STMT_DECL);
+ RECORD(STMT_ASM);
+ RECORD(EXPR_PREDEFINED);
+ RECORD(EXPR_DECL_REF);
+ RECORD(EXPR_INTEGER_LITERAL);
+ RECORD(EXPR_FLOATING_LITERAL);
+ RECORD(EXPR_IMAGINARY_LITERAL);
+ RECORD(EXPR_STRING_LITERAL);
+ RECORD(EXPR_CHARACTER_LITERAL);
+ RECORD(EXPR_PAREN);
+ RECORD(EXPR_UNARY_OPERATOR);
+ RECORD(EXPR_SIZEOF_ALIGN_OF);
+ RECORD(EXPR_ARRAY_SUBSCRIPT);
+ RECORD(EXPR_CALL);
+ RECORD(EXPR_MEMBER);
+ RECORD(EXPR_BINARY_OPERATOR);
+ RECORD(EXPR_COMPOUND_ASSIGN_OPERATOR);
+ RECORD(EXPR_CONDITIONAL_OPERATOR);
+ RECORD(EXPR_IMPLICIT_CAST);
+ RECORD(EXPR_CSTYLE_CAST);
+ RECORD(EXPR_COMPOUND_LITERAL);
+ RECORD(EXPR_EXT_VECTOR_ELEMENT);
+ RECORD(EXPR_INIT_LIST);
+ RECORD(EXPR_DESIGNATED_INIT);
+ RECORD(EXPR_IMPLICIT_VALUE_INIT);
+ RECORD(EXPR_VA_ARG);
+ RECORD(EXPR_ADDR_LABEL);
+ RECORD(EXPR_STMT);
+ RECORD(EXPR_TYPES_COMPATIBLE);
+ RECORD(EXPR_CHOOSE);
+ RECORD(EXPR_GNU_NULL);
+ RECORD(EXPR_SHUFFLE_VECTOR);
+ RECORD(EXPR_BLOCK);
+ RECORD(EXPR_BLOCK_DECL_REF);
+ RECORD(EXPR_OBJC_STRING_LITERAL);
+ RECORD(EXPR_OBJC_ENCODE);
+ RECORD(EXPR_OBJC_SELECTOR_EXPR);
+ RECORD(EXPR_OBJC_PROTOCOL_EXPR);
+ RECORD(EXPR_OBJC_IVAR_REF_EXPR);
+ RECORD(EXPR_OBJC_PROPERTY_REF_EXPR);
+ RECORD(EXPR_OBJC_KVC_REF_EXPR);
+ RECORD(EXPR_OBJC_MESSAGE_EXPR);
+ RECORD(EXPR_OBJC_SUPER_EXPR);
+ RECORD(STMT_OBJC_FOR_COLLECTION);
+ RECORD(STMT_OBJC_CATCH);
+ RECORD(STMT_OBJC_FINALLY);
+ RECORD(STMT_OBJC_AT_TRY);
+ RECORD(STMT_OBJC_AT_SYNCHRONIZED);
+ RECORD(STMT_OBJC_AT_THROW);
+#undef RECORD
+}
+
+void PCHWriter::WriteBlockInfoBlock() {
+ RecordData Record;
+ Stream.EnterSubblock(llvm::bitc::BLOCKINFO_BLOCK_ID, 3);
+
+#define BLOCK(X) EmitBlockID(pch::X ## _ID, #X, Stream, Record)
+#define RECORD(X) EmitRecordID(pch::X, #X, Stream, Record)
+
+ // PCH Top-Level Block.
+ BLOCK(PCH_BLOCK);
+ RECORD(TYPE_OFFSET);
+ RECORD(DECL_OFFSET);
+ RECORD(LANGUAGE_OPTIONS);
+ RECORD(METADATA);
+ RECORD(IDENTIFIER_OFFSET);
+ RECORD(IDENTIFIER_TABLE);
+ RECORD(EXTERNAL_DEFINITIONS);
+ RECORD(SPECIAL_TYPES);
+ RECORD(STATISTICS);
+ RECORD(TENTATIVE_DEFINITIONS);
+ RECORD(LOCALLY_SCOPED_EXTERNAL_DECLS);
+ RECORD(SELECTOR_OFFSETS);
+ RECORD(METHOD_POOL);
+ RECORD(PP_COUNTER_VALUE);
+ RECORD(SOURCE_LOCATION_OFFSETS);
+ RECORD(SOURCE_LOCATION_PRELOADS);
+ RECORD(STAT_CACHE);
+ RECORD(EXT_VECTOR_DECLS);
+ RECORD(OBJC_CATEGORY_IMPLEMENTATIONS);
+
+ // SourceManager Block.
+ BLOCK(SOURCE_MANAGER_BLOCK);
+ RECORD(SM_SLOC_FILE_ENTRY);
+ RECORD(SM_SLOC_BUFFER_ENTRY);
+ RECORD(SM_SLOC_BUFFER_BLOB);
+ RECORD(SM_SLOC_INSTANTIATION_ENTRY);
+ RECORD(SM_LINE_TABLE);
+ RECORD(SM_HEADER_FILE_INFO);
+
+ // Preprocessor Block.
+ BLOCK(PREPROCESSOR_BLOCK);
+ RECORD(PP_MACRO_OBJECT_LIKE);
+ RECORD(PP_MACRO_FUNCTION_LIKE);
+ RECORD(PP_TOKEN);
+
+ // Types block.
+ BLOCK(TYPES_BLOCK);
+ RECORD(TYPE_EXT_QUAL);
+ RECORD(TYPE_FIXED_WIDTH_INT);
+ RECORD(TYPE_COMPLEX);
+ RECORD(TYPE_POINTER);
+ RECORD(TYPE_BLOCK_POINTER);
+ RECORD(TYPE_LVALUE_REFERENCE);
+ RECORD(TYPE_RVALUE_REFERENCE);
+ RECORD(TYPE_MEMBER_POINTER);
+ RECORD(TYPE_CONSTANT_ARRAY);
+ RECORD(TYPE_INCOMPLETE_ARRAY);
+ RECORD(TYPE_VARIABLE_ARRAY);
+ RECORD(TYPE_VECTOR);
+ RECORD(TYPE_EXT_VECTOR);
+ RECORD(TYPE_FUNCTION_PROTO);
+ RECORD(TYPE_FUNCTION_NO_PROTO);
+ RECORD(TYPE_TYPEDEF);
+ RECORD(TYPE_TYPEOF_EXPR);
+ RECORD(TYPE_TYPEOF);
+ RECORD(TYPE_RECORD);
+ RECORD(TYPE_ENUM);
+ RECORD(TYPE_OBJC_INTERFACE);
+ RECORD(TYPE_OBJC_QUALIFIED_INTERFACE);
+ RECORD(TYPE_OBJC_QUALIFIED_ID);
+ // Statements and Exprs can occur in the Types block.
+ AddStmtsExprs(Stream, Record);
+
+ // Decls block.
+ BLOCK(DECLS_BLOCK);
+ RECORD(DECL_ATTR);
+ RECORD(DECL_TRANSLATION_UNIT);
+ RECORD(DECL_TYPEDEF);
+ RECORD(DECL_ENUM);
+ RECORD(DECL_RECORD);
+ RECORD(DECL_ENUM_CONSTANT);
+ RECORD(DECL_FUNCTION);
+ RECORD(DECL_OBJC_METHOD);
+ RECORD(DECL_OBJC_INTERFACE);
+ RECORD(DECL_OBJC_PROTOCOL);
+ RECORD(DECL_OBJC_IVAR);
+ RECORD(DECL_OBJC_AT_DEFS_FIELD);
+ RECORD(DECL_OBJC_CLASS);
+ RECORD(DECL_OBJC_FORWARD_PROTOCOL);
+ RECORD(DECL_OBJC_CATEGORY);
+ RECORD(DECL_OBJC_CATEGORY_IMPL);
+ RECORD(DECL_OBJC_IMPLEMENTATION);
+ RECORD(DECL_OBJC_COMPATIBLE_ALIAS);
+ RECORD(DECL_OBJC_PROPERTY);
+ RECORD(DECL_OBJC_PROPERTY_IMPL);
+ RECORD(DECL_FIELD);
+ RECORD(DECL_VAR);
+ RECORD(DECL_IMPLICIT_PARAM);
+ RECORD(DECL_PARM_VAR);
+ RECORD(DECL_ORIGINAL_PARM_VAR);
+ RECORD(DECL_FILE_SCOPE_ASM);
+ RECORD(DECL_BLOCK);
+ RECORD(DECL_CONTEXT_LEXICAL);
+ RECORD(DECL_CONTEXT_VISIBLE);
+ // Statements and Exprs can occur in the Decls block.
+ AddStmtsExprs(Stream, Record);
+#undef RECORD
+#undef BLOCK
+ Stream.ExitBlock();
+}
+
+
+/// \brief Write the PCH metadata (e.g., i686-apple-darwin9).
+void PCHWriter::WriteMetadata(ASTContext &Context) {
+ using namespace llvm;
+
+ // Original file name
+ SourceManager &SM = Context.getSourceManager();
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ BitCodeAbbrev *FileAbbrev = new BitCodeAbbrev();
+ FileAbbrev->Add(BitCodeAbbrevOp(pch::ORIGINAL_FILE_NAME));
+ FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev);
+
+ llvm::sys::Path MainFilePath(MainFile->getName());
+ std::string MainFileName;
+
+ if (!MainFilePath.isAbsolute()) {
+ llvm::sys::Path P = llvm::sys::Path::GetCurrentDirectory();
+ P.appendComponent(MainFilePath.toString());
+ MainFileName = P.toString();
+ } else {
+ MainFileName = MainFilePath.toString();
+ }
+
+ RecordData Record;
+ Record.push_back(pch::ORIGINAL_FILE_NAME);
+ Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileName.c_str(),
+ MainFileName.size());
+ }
+
+ // Metadata
+ const TargetInfo &Target = Context.Target;
+ BitCodeAbbrev *MetaAbbrev = new BitCodeAbbrev();
+ MetaAbbrev->Add(BitCodeAbbrevOp(pch::METADATA));
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // PCH major
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // PCH minor
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang major
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang minor
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Target triple
+ unsigned MetaAbbrevCode = Stream.EmitAbbrev(MetaAbbrev);
+
+ RecordData Record;
+ Record.push_back(pch::METADATA);
+ Record.push_back(pch::VERSION_MAJOR);
+ Record.push_back(pch::VERSION_MINOR);
+ Record.push_back(CLANG_VERSION_MAJOR);
+ Record.push_back(CLANG_VERSION_MINOR);
+ const char *Triple = Target.getTargetTriple();
+ Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, Triple, strlen(Triple));
+}
+
+/// \brief Write the LangOptions structure.
+void PCHWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
+ RecordData Record;
+ Record.push_back(LangOpts.Trigraphs);
+ Record.push_back(LangOpts.BCPLComment); // BCPL-style '//' comments.
+ Record.push_back(LangOpts.DollarIdents); // '$' allowed in identifiers.
+ Record.push_back(LangOpts.AsmPreprocessor); // Preprocessor in asm mode.
+ Record.push_back(LangOpts.GNUMode); // True in gnu99 mode false in c99 mode (etc)
+ Record.push_back(LangOpts.ImplicitInt); // C89 implicit 'int'.
+ Record.push_back(LangOpts.Digraphs); // C94, C99 and C++
+ Record.push_back(LangOpts.HexFloats); // C99 Hexadecimal float constants.
+ Record.push_back(LangOpts.C99); // C99 Support
+ Record.push_back(LangOpts.Microsoft); // Microsoft extensions.
+ Record.push_back(LangOpts.CPlusPlus); // C++ Support
+ Record.push_back(LangOpts.CPlusPlus0x); // C++0x Support
+ Record.push_back(LangOpts.CXXOperatorNames); // Treat C++ operator names as keywords.
+
+ Record.push_back(LangOpts.ObjC1); // Objective-C 1 support enabled.
+ Record.push_back(LangOpts.ObjC2); // Objective-C 2 support enabled.
+ Record.push_back(LangOpts.ObjCNonFragileABI); // Objective-C modern abi enabled
+
+ Record.push_back(LangOpts.PascalStrings); // Allow Pascal strings
+ Record.push_back(LangOpts.WritableStrings); // Allow writable strings
+ Record.push_back(LangOpts.LaxVectorConversions);
+ Record.push_back(LangOpts.Exceptions); // Support exception handling.
+
+ Record.push_back(LangOpts.NeXTRuntime); // Use NeXT runtime.
+ Record.push_back(LangOpts.Freestanding); // Freestanding implementation
+ Record.push_back(LangOpts.NoBuiltin); // Do not use builtin functions (-fno-builtin)
+
+ // Whether static initializers are protected by locks.
+ Record.push_back(LangOpts.ThreadsafeStatics);
+ Record.push_back(LangOpts.Blocks); // block extension to C
+ Record.push_back(LangOpts.EmitAllDecls); // Emit all declarations, even if
+ // they are unused.
+ Record.push_back(LangOpts.MathErrno); // Math functions must respect errno
+ // (modulo the platform support).
+
+ Record.push_back(LangOpts.OverflowChecking); // Extension to call a handler function when
+ // signed integer arithmetic overflows.
+
+ Record.push_back(LangOpts.HeinousExtensions); // Extensions that we really don't like and
+ // may be ripped out at any time.
+
+ Record.push_back(LangOpts.Optimize); // Whether __OPTIMIZE__ should be defined.
+ Record.push_back(LangOpts.OptimizeSize); // Whether __OPTIMIZE_SIZE__ should be
+ // defined.
+ Record.push_back(LangOpts.Static); // Should __STATIC__ be defined (as
+ // opposed to __DYNAMIC__).
+ Record.push_back(LangOpts.PICLevel); // The value for __PIC__, if non-zero.
+
+ Record.push_back(LangOpts.GNUInline); // Should GNU inline semantics be
+ // used (instead of C99 semantics).
+ Record.push_back(LangOpts.NoInline); // Should __NO_INLINE__ be defined.
+ Record.push_back(LangOpts.AccessControl); // Whether C++ access control should
+ // be enabled.
+ Record.push_back(LangOpts.getGCMode());
+ Record.push_back(LangOpts.getVisibilityMode());
+ Record.push_back(LangOpts.InstantiationDepth);
+ Stream.EmitRecord(pch::LANGUAGE_OPTIONS, Record);
+}
+
+//===----------------------------------------------------------------------===//
+// stat cache Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+// Trait used for the on-disk hash table of stat cache results.
+class VISIBILITY_HIDDEN PCHStatCacheTrait {
+public:
+ typedef const char * key_type;
+ typedef key_type key_type_ref;
+
+ typedef std::pair<int, struct stat> data_type;
+ typedef const data_type& data_type_ref;
+
+ static unsigned ComputeHash(const char *path) {
+ return BernsteinHash(path);
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(llvm::raw_ostream& Out, const char *path,
+ data_type_ref Data) {
+ unsigned StrLen = strlen(path);
+ clang::io::Emit16(Out, StrLen);
+ unsigned DataLen = 1; // result value
+ if (Data.first == 0)
+ DataLen += 4 + 4 + 2 + 8 + 8;
+ clang::io::Emit8(Out, DataLen);
+ return std::make_pair(StrLen + 1, DataLen);
+ }
+
+ void EmitKey(llvm::raw_ostream& Out, const char *path, unsigned KeyLen) {
+ Out.write(path, KeyLen);
+ }
+
+ void EmitData(llvm::raw_ostream& Out, key_type_ref,
+ data_type_ref Data, unsigned DataLen) {
+ using namespace clang::io;
+ uint64_t Start = Out.tell(); (void)Start;
+
+ // Result of stat()
+ Emit8(Out, Data.first? 1 : 0);
+
+ if (Data.first == 0) {
+ Emit32(Out, (uint32_t) Data.second.st_ino);
+ Emit32(Out, (uint32_t) Data.second.st_dev);
+ Emit16(Out, (uint16_t) Data.second.st_mode);
+ Emit64(Out, (uint64_t) Data.second.st_mtime);
+ Emit64(Out, (uint64_t) Data.second.st_size);
+ }
+
+ assert(Out.tell() - Start == DataLen && "Wrong data length");
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write the stat() system call cache to the PCH file.
+void PCHWriter::WriteStatCache(MemorizeStatCalls &StatCalls) {
+ // Build the on-disk hash table containing information about every
+ // stat() call.
+ OnDiskChainedHashTableGenerator<PCHStatCacheTrait> Generator;
+ unsigned NumStatEntries = 0;
+ for (MemorizeStatCalls::iterator Stat = StatCalls.begin(),
+ StatEnd = StatCalls.end();
+ Stat != StatEnd; ++Stat, ++NumStatEntries)
+ Generator.insert(Stat->first(), Stat->second);
+
+ // Create the on-disk hash table in a buffer.
+ llvm::SmallVector<char, 4096> StatCacheData;
+ uint32_t BucketOffset;
+ {
+ llvm::raw_svector_ostream Out(StatCacheData);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out);
+ }
+
+ // Create a blob abbreviation
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::STAT_CACHE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned StatCacheAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the stat cache
+ RecordData Record;
+ Record.push_back(pch::STAT_CACHE);
+ Record.push_back(BucketOffset);
+ Record.push_back(NumStatEntries);
+ Stream.EmitRecordWithBlob(StatCacheAbbrev, Record,
+ &StatCacheData.front(),
+ StatCacheData.size());
+}
+
+//===----------------------------------------------------------------------===//
+// Source Manager Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// file.
+static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::SM_SLOC_FILE_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// buffer.
+static unsigned CreateSLocBufferAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::SM_SLOC_BUFFER_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Buffer name blob
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// buffer's blob.
+static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::SM_SLOC_BUFFER_BLOB));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Blob
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to an
+/// buffer.
+static unsigned CreateSLocInstantiationAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::SM_SLOC_INSTANTIATION_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Start location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // End location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Token length
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Writes the block containing the serialized form of the
+/// source manager.
+///
+/// TODO: We should probably use an on-disk hash table (stored in a
+/// blob), indexed based on the file name, so that we only create
+/// entries for files that we actually need. In the common case (no
+/// errors), we probably won't have to create file entries for any of
+/// the files in the AST.
+void PCHWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
+ const Preprocessor &PP) {
+ RecordData Record;
+
+ // Enter the source manager block.
+ Stream.EnterSubblock(pch::SOURCE_MANAGER_BLOCK_ID, 3);
+
+ // Abbreviations for the various kinds of source-location entries.
+ unsigned SLocFileAbbrv = CreateSLocFileAbbrev(Stream);
+ unsigned SLocBufferAbbrv = CreateSLocBufferAbbrev(Stream);
+ unsigned SLocBufferBlobAbbrv = CreateSLocBufferBlobAbbrev(Stream);
+ unsigned SLocInstantiationAbbrv = CreateSLocInstantiationAbbrev(Stream);
+
+ // Write the line table.
+ if (SourceMgr.hasLineTable()) {
+ LineTableInfo &LineTable = SourceMgr.getLineTable();
+
+ // Emit the file names
+ Record.push_back(LineTable.getNumFilenames());
+ for (unsigned I = 0, N = LineTable.getNumFilenames(); I != N; ++I) {
+ // Emit the file name
+ const char *Filename = LineTable.getFilename(I);
+ unsigned FilenameLen = Filename? strlen(Filename) : 0;
+ Record.push_back(FilenameLen);
+ if (FilenameLen)
+ Record.insert(Record.end(), Filename, Filename + FilenameLen);
+ }
+
+ // Emit the line entries
+ for (LineTableInfo::iterator L = LineTable.begin(), LEnd = LineTable.end();
+ L != LEnd; ++L) {
+ // Emit the file ID
+ Record.push_back(L->first);
+
+ // Emit the line entries
+ Record.push_back(L->second.size());
+ for (std::vector<LineEntry>::iterator LE = L->second.begin(),
+ LEEnd = L->second.end();
+ LE != LEEnd; ++LE) {
+ Record.push_back(LE->FileOffset);
+ Record.push_back(LE->LineNo);
+ Record.push_back(LE->FilenameID);
+ Record.push_back((unsigned)LE->FileKind);
+ Record.push_back(LE->IncludeOffset);
+ }
+ }
+ Stream.EmitRecord(pch::SM_LINE_TABLE, Record);
+ }
+
+ // Write out entries for all of the header files we know about.
+ HeaderSearch &HS = PP.getHeaderSearchInfo();
+ Record.clear();
+ for (HeaderSearch::header_file_iterator I = HS.header_file_begin(),
+ E = HS.header_file_end();
+ I != E; ++I) {
+ Record.push_back(I->isImport);
+ Record.push_back(I->DirInfo);
+ Record.push_back(I->NumIncludes);
+ AddIdentifierRef(I->ControllingMacro, Record);
+ Stream.EmitRecord(pch::SM_HEADER_FILE_INFO, Record);
+ Record.clear();
+ }
+
+ // Write out the source location entry table. We skip the first
+ // entry, which is always the same dummy entry.
+ std::vector<uint32_t> SLocEntryOffsets;
+ RecordData PreloadSLocs;
+ SLocEntryOffsets.reserve(SourceMgr.sloc_entry_size() - 1);
+ for (SourceManager::sloc_entry_iterator
+ SLoc = SourceMgr.sloc_entry_begin() + 1,
+ SLocEnd = SourceMgr.sloc_entry_end();
+ SLoc != SLocEnd; ++SLoc) {
+ // Record the offset of this source-location entry.
+ SLocEntryOffsets.push_back(Stream.GetCurrentBitNo());
+
+ // Figure out which record code to use.
+ unsigned Code;
+ if (SLoc->isFile()) {
+ if (SLoc->getFile().getContentCache()->Entry)
+ Code = pch::SM_SLOC_FILE_ENTRY;
+ else
+ Code = pch::SM_SLOC_BUFFER_ENTRY;
+ } else
+ Code = pch::SM_SLOC_INSTANTIATION_ENTRY;
+ Record.clear();
+ Record.push_back(Code);
+
+ Record.push_back(SLoc->getOffset());
+ if (SLoc->isFile()) {
+ const SrcMgr::FileInfo &File = SLoc->getFile();
+ Record.push_back(File.getIncludeLoc().getRawEncoding());
+ Record.push_back(File.getFileCharacteristic()); // FIXME: stable encoding
+ Record.push_back(File.hasLineDirectives());
+
+ const SrcMgr::ContentCache *Content = File.getContentCache();
+ if (Content->Entry) {
+ // The source location entry is a file. The blob associated
+ // with this entry is the file name.
+ Stream.EmitRecordWithBlob(SLocFileAbbrv, Record,
+ Content->Entry->getName(),
+ strlen(Content->Entry->getName()));
+
+ // FIXME: For now, preload all file source locations, so that
+ // we get the appropriate File entries in the reader. This is
+ // a temporary measure.
+ PreloadSLocs.push_back(SLocEntryOffsets.size());
+ } else {
+ // The source location entry is a buffer. The blob associated
+ // with this entry contains the contents of the buffer.
+
+ // We add one to the size so that we capture the trailing NULL
+ // that is required by llvm::MemoryBuffer::getMemBuffer (on
+ // the reader side).
+ const llvm::MemoryBuffer *Buffer = Content->getBuffer();
+ const char *Name = Buffer->getBufferIdentifier();
+ Stream.EmitRecordWithBlob(SLocBufferAbbrv, Record, Name, strlen(Name) + 1);
+ Record.clear();
+ Record.push_back(pch::SM_SLOC_BUFFER_BLOB);
+ Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
+ Buffer->getBufferStart(),
+ Buffer->getBufferSize() + 1);
+
+ if (strcmp(Name, "<built-in>") == 0)
+ PreloadSLocs.push_back(SLocEntryOffsets.size());
+ }
+ } else {
+ // The source location entry is an instantiation.
+ const SrcMgr::InstantiationInfo &Inst = SLoc->getInstantiation();
+ Record.push_back(Inst.getSpellingLoc().getRawEncoding());
+ Record.push_back(Inst.getInstantiationLocStart().getRawEncoding());
+ Record.push_back(Inst.getInstantiationLocEnd().getRawEncoding());
+
+ // Compute the token length for this macro expansion.
+ unsigned NextOffset = SourceMgr.getNextOffset();
+ SourceManager::sloc_entry_iterator NextSLoc = SLoc;
+ if (++NextSLoc != SLocEnd)
+ NextOffset = NextSLoc->getOffset();
+ Record.push_back(NextOffset - SLoc->getOffset() - 1);
+ Stream.EmitRecordWithAbbrev(SLocInstantiationAbbrv, Record);
+ }
+ }
+
+ Stream.ExitBlock();
+
+ if (SLocEntryOffsets.empty())
+ return;
+
+ // Write the source-location offsets table into the PCH block. This
+ // table is used for lazily loading source-location information.
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::SOURCE_LOCATION_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // next offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
+ unsigned SLocOffsetsAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Record.clear();
+ Record.push_back(pch::SOURCE_LOCATION_OFFSETS);
+ Record.push_back(SLocEntryOffsets.size());
+ Record.push_back(SourceMgr.getNextOffset());
+ Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record,
+ (const char *)&SLocEntryOffsets.front(),
+ SLocEntryOffsets.size()*sizeof(SLocEntryOffsets[0]));
+
+ // Write the source location entry preloads array, telling the PCH
+ // reader which source locations entries it should load eagerly.
+ Stream.EmitRecord(pch::SOURCE_LOCATION_PRELOADS, PreloadSLocs);
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Writes the block containing the serialized form of the
+/// preprocessor.
+///
+void PCHWriter::WritePreprocessor(const Preprocessor &PP) {
+ RecordData Record;
+
+ // If the preprocessor __COUNTER__ value has been bumped, remember it.
+ if (PP.getCounterValue() != 0) {
+ Record.push_back(PP.getCounterValue());
+ Stream.EmitRecord(pch::PP_COUNTER_VALUE, Record);
+ Record.clear();
+ }
+
+ // Enter the preprocessor block.
+ Stream.EnterSubblock(pch::PREPROCESSOR_BLOCK_ID, 2);
+
+ // If the PCH file contains __DATE__ or __TIME__ emit a warning about this.
+ // FIXME: use diagnostics subsystem for localization etc.
+ if (PP.SawDateOrTime())
+ fprintf(stderr, "warning: precompiled header used __DATE__ or __TIME__.\n");
+
+ // Loop over all the macro definitions that are live at the end of the file,
+ // emitting each to the PP section.
+ for (Preprocessor::macro_iterator I = PP.macro_begin(), E = PP.macro_end();
+ I != E; ++I) {
+ // FIXME: This emits macros in hash table order, we should do it in a stable
+ // order so that output is reproducible.
+ MacroInfo *MI = I->second;
+
+ // Don't emit builtin macros like __LINE__ to the PCH file unless they have
+ // been redefined by the header (in which case they are not isBuiltinMacro).
+ if (MI->isBuiltinMacro())
+ continue;
+
+ // FIXME: Remove this identifier reference?
+ AddIdentifierRef(I->first, Record);
+ MacroOffsets[I->first] = Stream.GetCurrentBitNo();
+ Record.push_back(MI->getDefinitionLoc().getRawEncoding());
+ Record.push_back(MI->isUsed());
+
+ unsigned Code;
+ if (MI->isObjectLike()) {
+ Code = pch::PP_MACRO_OBJECT_LIKE;
+ } else {
+ Code = pch::PP_MACRO_FUNCTION_LIKE;
+
+ Record.push_back(MI->isC99Varargs());
+ Record.push_back(MI->isGNUVarargs());
+ Record.push_back(MI->getNumArgs());
+ for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end();
+ I != E; ++I)
+ AddIdentifierRef(*I, Record);
+ }
+ Stream.EmitRecord(Code, Record);
+ Record.clear();
+
+ // Emit the tokens array.
+ for (unsigned TokNo = 0, e = MI->getNumTokens(); TokNo != e; ++TokNo) {
+ // Note that we know that the preprocessor does not have any annotation
+ // tokens in it because they are created by the parser, and thus can't be
+ // in a macro definition.
+ const Token &Tok = MI->getReplacementToken(TokNo);
+
+ Record.push_back(Tok.getLocation().getRawEncoding());
+ Record.push_back(Tok.getLength());
+
+ // FIXME: When reading literal tokens, reconstruct the literal pointer if
+ // it is needed.
+ AddIdentifierRef(Tok.getIdentifierInfo(), Record);
+
+ // FIXME: Should translate token kind to a stable encoding.
+ Record.push_back(Tok.getKind());
+ // FIXME: Should translate token flags to a stable encoding.
+ Record.push_back(Tok.getFlags());
+
+ Stream.EmitRecord(pch::PP_TOKEN, Record);
+ Record.clear();
+ }
+ ++NumMacros;
+ }
+ Stream.ExitBlock();
+}
+
+//===----------------------------------------------------------------------===//
+// Type Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Write the representation of a type to the PCH stream.
+void PCHWriter::WriteType(const Type *T) {
+ pch::TypeID &ID = TypeIDs[T];
+ if (ID == 0) // we haven't seen this type before.
+ ID = NextTypeID++;
+
+ // Record the offset for this type.
+ if (TypeOffsets.size() == ID - pch::NUM_PREDEF_TYPE_IDS)
+ TypeOffsets.push_back(Stream.GetCurrentBitNo());
+ else if (TypeOffsets.size() < ID - pch::NUM_PREDEF_TYPE_IDS) {
+ TypeOffsets.resize(ID + 1 - pch::NUM_PREDEF_TYPE_IDS);
+ TypeOffsets[ID - pch::NUM_PREDEF_TYPE_IDS] = Stream.GetCurrentBitNo();
+ }
+
+ RecordData Record;
+
+ // Emit the type's representation.
+ PCHTypeWriter W(*this, Record);
+ switch (T->getTypeClass()) {
+ // For all of the concrete, non-dependent types, call the
+ // appropriate visitor function.
+#define TYPE(Class, Base) \
+ case Type::Class: W.Visit##Class##Type(cast<Class##Type>(T)); break;
+#define ABSTRACT_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+
+ // For all of the dependent type nodes (which only occur in C++
+ // templates), produce an error.
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ assert(false && "Cannot serialize dependent type nodes");
+ break;
+ }
+
+ // Emit the serialized record.
+ Stream.EmitRecord(W.Code, Record);
+
+ // Flush any expressions that were written as part of this type.
+ FlushStmts();
+}
+
+/// \brief Write a block containing all of the types.
+void PCHWriter::WriteTypesBlock(ASTContext &Context) {
+ // Enter the types block.
+ Stream.EnterSubblock(pch::TYPES_BLOCK_ID, 2);
+
+ // Emit all of the types that need to be emitted (so far).
+ while (!TypesToEmit.empty()) {
+ const Type *T = TypesToEmit.front();
+ TypesToEmit.pop();
+ assert(!isa<BuiltinType>(T) && "Built-in types are not serialized");
+ WriteType(T);
+ }
+
+ // Exit the types block
+ Stream.ExitBlock();
+}
+
+//===----------------------------------------------------------------------===//
+// Declaration Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Write the block containing all of the declaration IDs
+/// lexically declared within the given DeclContext.
+///
+/// \returns the offset of the DECL_CONTEXT_LEXICAL block within the
+/// bistream, or 0 if no block was written.
+uint64_t PCHWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
+ DeclContext *DC) {
+ if (DC->decls_empty(Context))
+ return 0;
+
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ RecordData Record;
+ for (DeclContext::decl_iterator D = DC->decls_begin(Context),
+ DEnd = DC->decls_end(Context);
+ D != DEnd; ++D)
+ AddDeclRef(*D, Record);
+
+ ++NumLexicalDeclContexts;
+ Stream.EmitRecord(pch::DECL_CONTEXT_LEXICAL, Record);
+ return Offset;
+}
+
+/// \brief Write the block containing all of the declaration IDs
+/// visible from the given DeclContext.
+///
+/// \returns the offset of the DECL_CONTEXT_VISIBLE block within the
+/// bistream, or 0 if no block was written.
+uint64_t PCHWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
+ DeclContext *DC) {
+ if (DC->getPrimaryContext() != DC)
+ return 0;
+
+ // Since there is no name lookup into functions or methods, and we
+ // perform name lookup for the translation unit via the
+ // IdentifierInfo chains, don't bother to build a
+ // visible-declarations table for these entities.
+ if (DC->isFunctionOrMethod() || DC->isTranslationUnit())
+ return 0;
+
+ // Force the DeclContext to build a its name-lookup table.
+ DC->lookup(Context, DeclarationName());
+
+ // Serialize the contents of the mapping used for lookup. Note that,
+ // although we have two very different code paths, the serialized
+ // representation is the same for both cases: a declaration name,
+ // followed by a size, followed by references to the visible
+ // declarations that have that name.
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ RecordData Record;
+ StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(DC->getLookupPtr());
+ if (!Map)
+ return 0;
+
+ for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
+ D != DEnd; ++D) {
+ AddDeclarationName(D->first, Record);
+ DeclContext::lookup_result Result = D->second.getLookupResult(Context);
+ Record.push_back(Result.second - Result.first);
+ for(; Result.first != Result.second; ++Result.first)
+ AddDeclRef(*Result.first, Record);
+ }
+
+ if (Record.size() == 0)
+ return 0;
+
+ Stream.EmitRecord(pch::DECL_CONTEXT_VISIBLE, Record);
+ ++NumVisibleDeclContexts;
+ return Offset;
+}
+
+//===----------------------------------------------------------------------===//
+// Global Method Pool and Selector Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+// Trait used for the on-disk hash table used in the method pool.
+class VISIBILITY_HIDDEN PCHMethodPoolTrait {
+ PCHWriter &Writer;
+
+public:
+ typedef Selector key_type;
+ typedef key_type key_type_ref;
+
+ typedef std::pair<ObjCMethodList, ObjCMethodList> data_type;
+ typedef const data_type& data_type_ref;
+
+ explicit PCHMethodPoolTrait(PCHWriter &Writer) : Writer(Writer) { }
+
+ static unsigned ComputeHash(Selector Sel) {
+ unsigned N = Sel.getNumArgs();
+ if (N == 0)
+ ++N;
+ unsigned R = 5381;
+ for (unsigned I = 0; I != N; ++I)
+ if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(I))
+ R = clang::BernsteinHashPartial(II->getName(), II->getLength(), R);
+ return R;
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(llvm::raw_ostream& Out, Selector Sel,
+ data_type_ref Methods) {
+ unsigned KeyLen = 2 + (Sel.getNumArgs()? Sel.getNumArgs() * 4 : 4);
+ clang::io::Emit16(Out, KeyLen);
+ unsigned DataLen = 2 + 2; // 2 bytes for each of the method counts
+ for (const ObjCMethodList *Method = &Methods.first; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ DataLen += 4;
+ for (const ObjCMethodList *Method = &Methods.second; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ DataLen += 4;
+ clang::io::Emit16(Out, DataLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(llvm::raw_ostream& Out, Selector Sel, unsigned) {
+ uint64_t Start = Out.tell();
+ assert((Start >> 32) == 0 && "Selector key offset too large");
+ Writer.SetSelectorOffset(Sel, Start);
+ unsigned N = Sel.getNumArgs();
+ clang::io::Emit16(Out, N);
+ if (N == 0)
+ N = 1;
+ for (unsigned I = 0; I != N; ++I)
+ clang::io::Emit32(Out,
+ Writer.getIdentifierRef(Sel.getIdentifierInfoForSlot(I)));
+ }
+
+ void EmitData(llvm::raw_ostream& Out, key_type_ref,
+ data_type_ref Methods, unsigned DataLen) {
+ uint64_t Start = Out.tell(); (void)Start;
+ unsigned NumInstanceMethods = 0;
+ for (const ObjCMethodList *Method = &Methods.first; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ ++NumInstanceMethods;
+
+ unsigned NumFactoryMethods = 0;
+ for (const ObjCMethodList *Method = &Methods.second; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ ++NumFactoryMethods;
+
+ clang::io::Emit16(Out, NumInstanceMethods);
+ clang::io::Emit16(Out, NumFactoryMethods);
+ for (const ObjCMethodList *Method = &Methods.first; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ clang::io::Emit32(Out, Writer.getDeclID(Method->Method));
+ for (const ObjCMethodList *Method = &Methods.second; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ clang::io::Emit32(Out, Writer.getDeclID(Method->Method));
+
+ assert(Out.tell() - Start == DataLen && "Data length is wrong");
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write the method pool into the PCH file.
+///
+/// The method pool contains both instance and factory methods, stored
+/// in an on-disk hash table indexed by the selector.
+void PCHWriter::WriteMethodPool(Sema &SemaRef) {
+ using namespace llvm;
+
+ // Create and write out the blob that contains the instance and
+ // factor method pools.
+ bool Empty = true;
+ {
+ OnDiskChainedHashTableGenerator<PCHMethodPoolTrait> Generator;
+
+ // Create the on-disk hash table representation. Start by
+ // iterating through the instance method pool.
+ PCHMethodPoolTrait::key_type Key;
+ unsigned NumSelectorsInMethodPool = 0;
+ for (llvm::DenseMap<Selector, ObjCMethodList>::iterator
+ Instance = SemaRef.InstanceMethodPool.begin(),
+ InstanceEnd = SemaRef.InstanceMethodPool.end();
+ Instance != InstanceEnd; ++Instance) {
+ // Check whether there is a factory method with the same
+ // selector.
+ llvm::DenseMap<Selector, ObjCMethodList>::iterator Factory
+ = SemaRef.FactoryMethodPool.find(Instance->first);
+
+ if (Factory == SemaRef.FactoryMethodPool.end())
+ Generator.insert(Instance->first,
+ std::make_pair(Instance->second,
+ ObjCMethodList()));
+ else
+ Generator.insert(Instance->first,
+ std::make_pair(Instance->second, Factory->second));
+
+ ++NumSelectorsInMethodPool;
+ Empty = false;
+ }
+
+ // Now iterate through the factory method pool, to pick up any
+ // selectors that weren't already in the instance method pool.
+ for (llvm::DenseMap<Selector, ObjCMethodList>::iterator
+ Factory = SemaRef.FactoryMethodPool.begin(),
+ FactoryEnd = SemaRef.FactoryMethodPool.end();
+ Factory != FactoryEnd; ++Factory) {
+ // Check whether there is an instance method with the same
+ // selector. If so, there is no work to do here.
+ llvm::DenseMap<Selector, ObjCMethodList>::iterator Instance
+ = SemaRef.InstanceMethodPool.find(Factory->first);
+
+ if (Instance == SemaRef.InstanceMethodPool.end()) {
+ Generator.insert(Factory->first,
+ std::make_pair(ObjCMethodList(), Factory->second));
+ ++NumSelectorsInMethodPool;
+ }
+
+ Empty = false;
+ }
+
+ if (Empty && SelectorOffsets.empty())
+ return;
+
+ // Create the on-disk hash table in a buffer.
+ llvm::SmallVector<char, 4096> MethodPool;
+ uint32_t BucketOffset;
+ SelectorOffsets.resize(SelVector.size());
+ {
+ PCHMethodPoolTrait Trait(*this);
+ llvm::raw_svector_ostream Out(MethodPool);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, Trait);
+
+ // For every selector that we have seen but which was not
+ // written into the hash table, write the selector itself and
+ // record it's offset.
+ for (unsigned I = 0, N = SelVector.size(); I != N; ++I)
+ if (SelectorOffsets[I] == 0)
+ Trait.EmitKey(Out, SelVector[I], 0);
+ }
+
+ // Create a blob abbreviation
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::METHOD_POOL));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned MethodPoolAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the method pool
+ RecordData Record;
+ Record.push_back(pch::METHOD_POOL);
+ Record.push_back(BucketOffset);
+ Record.push_back(NumSelectorsInMethodPool);
+ Stream.EmitRecordWithBlob(MethodPoolAbbrev, Record,
+ &MethodPool.front(),
+ MethodPool.size());
+
+ // Create a blob abbreviation for the selector table offsets.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::SELECTOR_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // index
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned SelectorOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the selector offsets table.
+ Record.clear();
+ Record.push_back(pch::SELECTOR_OFFSETS);
+ Record.push_back(SelectorOffsets.size());
+ Stream.EmitRecordWithBlob(SelectorOffsetAbbrev, Record,
+ (const char *)&SelectorOffsets.front(),
+ SelectorOffsets.size() * 4);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Identifier Table Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN PCHIdentifierTableTrait {
+ PCHWriter &Writer;
+ Preprocessor &PP;
+
+ /// \brief Determines whether this is an "interesting" identifier
+ /// that needs a full IdentifierInfo structure written into the hash
+ /// table.
+ static bool isInterestingIdentifier(const IdentifierInfo *II) {
+ return II->isPoisoned() ||
+ II->isExtensionToken() ||
+ II->hasMacroDefinition() ||
+ II->getObjCOrBuiltinID() ||
+ II->getFETokenInfo<void>();
+ }
+
+public:
+ typedef const IdentifierInfo* key_type;
+ typedef key_type key_type_ref;
+
+ typedef pch::IdentID data_type;
+ typedef data_type data_type_ref;
+
+ PCHIdentifierTableTrait(PCHWriter &Writer, Preprocessor &PP)
+ : Writer(Writer), PP(PP) { }
+
+ static unsigned ComputeHash(const IdentifierInfo* II) {
+ return clang::BernsteinHash(II->getName());
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(llvm::raw_ostream& Out, const IdentifierInfo* II,
+ pch::IdentID ID) {
+ unsigned KeyLen = strlen(II->getName()) + 1;
+ unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
+ if (isInterestingIdentifier(II)) {
+ DataLen += 2; // 2 bytes for builtin ID, flags
+ if (II->hasMacroDefinition() &&
+ !PP.getMacroInfo(const_cast<IdentifierInfo *>(II))->isBuiltinMacro())
+ DataLen += 4;
+ for (IdentifierResolver::iterator D = IdentifierResolver::begin(II),
+ DEnd = IdentifierResolver::end();
+ D != DEnd; ++D)
+ DataLen += sizeof(pch::DeclID);
+ }
+ clang::io::Emit16(Out, DataLen);
+ // We emit the key length after the data length so that every
+ // string is preceded by a 16-bit length. This matches the PTH
+ // format for storing identifiers.
+ clang::io::Emit16(Out, KeyLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(llvm::raw_ostream& Out, const IdentifierInfo* II,
+ unsigned KeyLen) {
+ // Record the location of the key data. This is used when generating
+ // the mapping from persistent IDs to strings.
+ Writer.SetIdentifierOffset(II, Out.tell());
+ Out.write(II->getName(), KeyLen);
+ }
+
+ void EmitData(llvm::raw_ostream& Out, const IdentifierInfo* II,
+ pch::IdentID ID, unsigned) {
+ if (!isInterestingIdentifier(II)) {
+ clang::io::Emit32(Out, ID << 1);
+ return;
+ }
+
+ clang::io::Emit32(Out, (ID << 1) | 0x01);
+ uint32_t Bits = 0;
+ bool hasMacroDefinition =
+ II->hasMacroDefinition() &&
+ !PP.getMacroInfo(const_cast<IdentifierInfo *>(II))->isBuiltinMacro();
+ Bits = (uint32_t)II->getObjCOrBuiltinID();
+ Bits = (Bits << 1) | hasMacroDefinition;
+ Bits = (Bits << 1) | II->isExtensionToken();
+ Bits = (Bits << 1) | II->isPoisoned();
+ Bits = (Bits << 1) | II->isCPlusPlusOperatorKeyword();
+ clang::io::Emit16(Out, Bits);
+
+ if (hasMacroDefinition)
+ clang::io::Emit32(Out, Writer.getMacroOffset(II));
+
+ // Emit the declaration IDs in reverse order, because the
+ // IdentifierResolver provides the declarations as they would be
+ // visible (e.g., the function "stat" would come before the struct
+ // "stat"), but IdentifierResolver::AddDeclToIdentifierChain()
+ // adds declarations to the end of the list (so we need to see the
+ // struct "status" before the function "status").
+ llvm::SmallVector<Decl *, 16> Decls(IdentifierResolver::begin(II),
+ IdentifierResolver::end());
+ for (llvm::SmallVector<Decl *, 16>::reverse_iterator D = Decls.rbegin(),
+ DEnd = Decls.rend();
+ D != DEnd; ++D)
+ clang::io::Emit32(Out, Writer.getDeclID(*D));
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write the identifier table into the PCH file.
+///
+/// The identifier table consists of a blob containing string data
+/// (the actual identifiers themselves) and a separate "offsets" index
+/// that maps identifier IDs to locations within the blob.
+void PCHWriter::WriteIdentifierTable(Preprocessor &PP) {
+ using namespace llvm;
+
+ // Create and write out the blob that contains the identifier
+ // strings.
+ {
+ OnDiskChainedHashTableGenerator<PCHIdentifierTableTrait> Generator;
+
+ // Look for any identifiers that were named while processing the
+ // headers, but are otherwise not needed. We add these to the hash
+ // table to enable checking of the predefines buffer in the case
+ // where the user adds new macro definitions when building the PCH
+ // file.
+ for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(),
+ IDEnd = PP.getIdentifierTable().end();
+ ID != IDEnd; ++ID)
+ getIdentifierRef(ID->second);
+
+ // Create the on-disk hash table representation.
+ IdentifierOffsets.resize(IdentifierIDs.size());
+ for (llvm::DenseMap<const IdentifierInfo *, pch::IdentID>::iterator
+ ID = IdentifierIDs.begin(), IDEnd = IdentifierIDs.end();
+ ID != IDEnd; ++ID) {
+ assert(ID->first && "NULL identifier in identifier table");
+ Generator.insert(ID->first, ID->second);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ llvm::SmallVector<char, 4096> IdentifierTable;
+ uint32_t BucketOffset;
+ {
+ PCHIdentifierTableTrait Trait(*this, PP);
+ llvm::raw_svector_ostream Out(IdentifierTable);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Create a blob abbreviation
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::IDENTIFIER_TABLE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the identifier table
+ RecordData Record;
+ Record.push_back(pch::IDENTIFIER_TABLE);
+ Record.push_back(BucketOffset);
+ Stream.EmitRecordWithBlob(IDTableAbbrev, Record,
+ &IdentifierTable.front(),
+ IdentifierTable.size());
+ }
+
+ // Write the offsets table for identifier IDs.
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::IDENTIFIER_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of identifiers
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IdentifierOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ RecordData Record;
+ Record.push_back(pch::IDENTIFIER_OFFSET);
+ Record.push_back(IdentifierOffsets.size());
+ Stream.EmitRecordWithBlob(IdentifierOffsetAbbrev, Record,
+ (const char *)&IdentifierOffsets.front(),
+ IdentifierOffsets.size() * sizeof(uint32_t));
+}
+
+//===----------------------------------------------------------------------===//
+// General Serialization Routines
+//===----------------------------------------------------------------------===//
+
+/// \brief Write a record containing the given attributes.
+void PCHWriter::WriteAttributeRecord(const Attr *Attr) {
+ RecordData Record;
+ for (; Attr; Attr = Attr->getNext()) {
+ Record.push_back(Attr->getKind()); // FIXME: stable encoding
+ Record.push_back(Attr->isInherited());
+ switch (Attr->getKind()) {
+ case Attr::Alias:
+ AddString(cast<AliasAttr>(Attr)->getAliasee(), Record);
+ break;
+
+ case Attr::Aligned:
+ Record.push_back(cast<AlignedAttr>(Attr)->getAlignment());
+ break;
+
+ case Attr::AlwaysInline:
+ break;
+
+ case Attr::AnalyzerNoReturn:
+ break;
+
+ case Attr::Annotate:
+ AddString(cast<AnnotateAttr>(Attr)->getAnnotation(), Record);
+ break;
+
+ case Attr::AsmLabel:
+ AddString(cast<AsmLabelAttr>(Attr)->getLabel(), Record);
+ break;
+
+ case Attr::Blocks:
+ Record.push_back(cast<BlocksAttr>(Attr)->getType()); // FIXME: stable
+ break;
+
+ case Attr::Cleanup:
+ AddDeclRef(cast<CleanupAttr>(Attr)->getFunctionDecl(), Record);
+ break;
+
+ case Attr::Const:
+ break;
+
+ case Attr::Constructor:
+ Record.push_back(cast<ConstructorAttr>(Attr)->getPriority());
+ break;
+
+ case Attr::DLLExport:
+ case Attr::DLLImport:
+ case Attr::Deprecated:
+ break;
+
+ case Attr::Destructor:
+ Record.push_back(cast<DestructorAttr>(Attr)->getPriority());
+ break;
+
+ case Attr::FastCall:
+ break;
+
+ case Attr::Format: {
+ const FormatAttr *Format = cast<FormatAttr>(Attr);
+ AddString(Format->getType(), Record);
+ Record.push_back(Format->getFormatIdx());
+ Record.push_back(Format->getFirstArg());
+ break;
+ }
+
+ case Attr::FormatArg: {
+ const FormatArgAttr *Format = cast<FormatArgAttr>(Attr);
+ Record.push_back(Format->getFormatIdx());
+ break;
+ }
+
+ case Attr::Sentinel : {
+ const SentinelAttr *Sentinel = cast<SentinelAttr>(Attr);
+ Record.push_back(Sentinel->getSentinel());
+ Record.push_back(Sentinel->getNullPos());
+ break;
+ }
+
+ case Attr::GNUInline:
+ case Attr::IBOutletKind:
+ case Attr::NoReturn:
+ case Attr::NoThrow:
+ case Attr::Nodebug:
+ case Attr::Noinline:
+ break;
+
+ case Attr::NonNull: {
+ const NonNullAttr *NonNull = cast<NonNullAttr>(Attr);
+ Record.push_back(NonNull->size());
+ Record.insert(Record.end(), NonNull->begin(), NonNull->end());
+ break;
+ }
+
+ case Attr::ObjCException:
+ case Attr::ObjCNSObject:
+ case Attr::CFReturnsRetained:
+ case Attr::NSReturnsRetained:
+ case Attr::Overloadable:
+ break;
+
+ case Attr::Packed:
+ Record.push_back(cast<PackedAttr>(Attr)->getAlignment());
+ break;
+
+ case Attr::Pure:
+ break;
+
+ case Attr::Regparm:
+ Record.push_back(cast<RegparmAttr>(Attr)->getNumParams());
+ break;
+
+ case Attr::Section:
+ AddString(cast<SectionAttr>(Attr)->getName(), Record);
+ break;
+
+ case Attr::StdCall:
+ case Attr::TransparentUnion:
+ case Attr::Unavailable:
+ case Attr::Unused:
+ case Attr::Used:
+ break;
+
+ case Attr::Visibility:
+ // FIXME: stable encoding
+ Record.push_back(cast<VisibilityAttr>(Attr)->getVisibility());
+ break;
+
+ case Attr::WarnUnusedResult:
+ case Attr::Weak:
+ case Attr::WeakImport:
+ break;
+ }
+ }
+
+ Stream.EmitRecord(pch::DECL_ATTR, Record);
+}
+
+void PCHWriter::AddString(const std::string &Str, RecordData &Record) {
+ Record.push_back(Str.size());
+ Record.insert(Record.end(), Str.begin(), Str.end());
+}
+
+/// \brief Note that the identifier II occurs at the given offset
+/// within the identifier table.
+void PCHWriter::SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset) {
+ IdentifierOffsets[IdentifierIDs[II] - 1] = Offset;
+}
+
+/// \brief Note that the selector Sel occurs at the given offset
+/// within the method pool/selector table.
+void PCHWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
+ unsigned ID = SelectorIDs[Sel];
+ assert(ID && "Unknown selector");
+ SelectorOffsets[ID - 1] = Offset;
+}
+
+PCHWriter::PCHWriter(llvm::BitstreamWriter &Stream)
+ : Stream(Stream), NextTypeID(pch::NUM_PREDEF_TYPE_IDS),
+ NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0),
+ NumVisibleDeclContexts(0) { }
+
+void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls) {
+ using namespace llvm;
+
+ ASTContext &Context = SemaRef.Context;
+ Preprocessor &PP = SemaRef.PP;
+
+ // Emit the file header.
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit((unsigned)'P', 8);
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit((unsigned)'H', 8);
+
+ WriteBlockInfoBlock();
+
+ // The translation unit is the first declaration we'll emit.
+ DeclIDs[Context.getTranslationUnitDecl()] = 1;
+ DeclsToEmit.push(Context.getTranslationUnitDecl());
+
+ // Make sure that we emit IdentifierInfos (and any attached
+ // declarations) for builtins.
+ {
+ IdentifierTable &Table = PP.getIdentifierTable();
+ llvm::SmallVector<const char *, 32> BuiltinNames;
+ Context.BuiltinInfo.GetBuiltinNames(BuiltinNames,
+ Context.getLangOptions().NoBuiltin);
+ for (unsigned I = 0, N = BuiltinNames.size(); I != N; ++I)
+ getIdentifierRef(&Table.get(BuiltinNames[I]));
+ }
+
+ // Build a record containing all of the tentative definitions in
+ // this header file. Generally, this record will be empty.
+ RecordData TentativeDefinitions;
+ for (llvm::DenseMap<DeclarationName, VarDecl *>::iterator
+ TD = SemaRef.TentativeDefinitions.begin(),
+ TDEnd = SemaRef.TentativeDefinitions.end();
+ TD != TDEnd; ++TD)
+ AddDeclRef(TD->second, TentativeDefinitions);
+
+ // Build a record containing all of the locally-scoped external
+ // declarations in this header file. Generally, this record will be
+ // empty.
+ RecordData LocallyScopedExternalDecls;
+ for (llvm::DenseMap<DeclarationName, NamedDecl *>::iterator
+ TD = SemaRef.LocallyScopedExternalDecls.begin(),
+ TDEnd = SemaRef.LocallyScopedExternalDecls.end();
+ TD != TDEnd; ++TD)
+ AddDeclRef(TD->second, LocallyScopedExternalDecls);
+
+ // Build a record containing all of the ext_vector declarations.
+ RecordData ExtVectorDecls;
+ for (unsigned I = 0, N = SemaRef.ExtVectorDecls.size(); I != N; ++I)
+ AddDeclRef(SemaRef.ExtVectorDecls[I], ExtVectorDecls);
+
+ // Build a record containing all of the Objective-C category
+ // implementations.
+ RecordData ObjCCategoryImpls;
+ for (unsigned I = 0, N = SemaRef.ObjCCategoryImpls.size(); I != N; ++I)
+ AddDeclRef(SemaRef.ObjCCategoryImpls[I], ObjCCategoryImpls);
+
+ // Write the remaining PCH contents.
+ RecordData Record;
+ Stream.EnterSubblock(pch::PCH_BLOCK_ID, 4);
+ WriteMetadata(Context);
+ WriteLanguageOptions(Context.getLangOptions());
+ if (StatCalls)
+ WriteStatCache(*StatCalls);
+ WriteSourceManagerBlock(Context.getSourceManager(), PP);
+ WritePreprocessor(PP);
+
+ // Keep writing types and declarations until all types and
+ // declarations have been written.
+ do {
+ if (!DeclsToEmit.empty())
+ WriteDeclsBlock(Context);
+ if (!TypesToEmit.empty())
+ WriteTypesBlock(Context);
+ } while (!(DeclsToEmit.empty() && TypesToEmit.empty()));
+
+ WriteMethodPool(SemaRef);
+ WriteIdentifierTable(PP);
+
+ // Write the type offsets array
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::TYPE_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of types
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // types block
+ unsigned TypeOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ Record.clear();
+ Record.push_back(pch::TYPE_OFFSET);
+ Record.push_back(TypeOffsets.size());
+ Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record,
+ (const char *)&TypeOffsets.front(),
+ TypeOffsets.size() * sizeof(TypeOffsets[0]));
+
+ // Write the declaration offsets array
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(pch::DECL_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of declarations
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // declarations block
+ unsigned DeclOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ Record.clear();
+ Record.push_back(pch::DECL_OFFSET);
+ Record.push_back(DeclOffsets.size());
+ Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record,
+ (const char *)&DeclOffsets.front(),
+ DeclOffsets.size() * sizeof(DeclOffsets[0]));
+
+ // Write the record of special types.
+ Record.clear();
+ AddTypeRef(Context.getBuiltinVaListType(), Record);
+ AddTypeRef(Context.getObjCIdType(), Record);
+ AddTypeRef(Context.getObjCSelType(), Record);
+ AddTypeRef(Context.getObjCProtoType(), Record);
+ AddTypeRef(Context.getObjCClassType(), Record);
+ AddTypeRef(Context.getRawCFConstantStringType(), Record);
+ AddTypeRef(Context.getRawObjCFastEnumerationStateType(), Record);
+ Stream.EmitRecord(pch::SPECIAL_TYPES, Record);
+
+ // Write the record containing external, unnamed definitions.
+ if (!ExternalDefinitions.empty())
+ Stream.EmitRecord(pch::EXTERNAL_DEFINITIONS, ExternalDefinitions);
+
+ // Write the record containing tentative definitions.
+ if (!TentativeDefinitions.empty())
+ Stream.EmitRecord(pch::TENTATIVE_DEFINITIONS, TentativeDefinitions);
+
+ // Write the record containing locally-scoped external definitions.
+ if (!LocallyScopedExternalDecls.empty())
+ Stream.EmitRecord(pch::LOCALLY_SCOPED_EXTERNAL_DECLS,
+ LocallyScopedExternalDecls);
+
+ // Write the record containing ext_vector type names.
+ if (!ExtVectorDecls.empty())
+ Stream.EmitRecord(pch::EXT_VECTOR_DECLS, ExtVectorDecls);
+
+ // Write the record containing Objective-C category implementations.
+ if (!ObjCCategoryImpls.empty())
+ Stream.EmitRecord(pch::OBJC_CATEGORY_IMPLEMENTATIONS, ObjCCategoryImpls);
+
+ // Some simple statistics
+ Record.clear();
+ Record.push_back(NumStatements);
+ Record.push_back(NumMacros);
+ Record.push_back(NumLexicalDeclContexts);
+ Record.push_back(NumVisibleDeclContexts);
+ Stream.EmitRecord(pch::STATISTICS, Record);
+ Stream.ExitBlock();
+}
+
+void PCHWriter::AddSourceLocation(SourceLocation Loc, RecordData &Record) {
+ Record.push_back(Loc.getRawEncoding());
+}
+
+void PCHWriter::AddAPInt(const llvm::APInt &Value, RecordData &Record) {
+ Record.push_back(Value.getBitWidth());
+ unsigned N = Value.getNumWords();
+ const uint64_t* Words = Value.getRawData();
+ for (unsigned I = 0; I != N; ++I)
+ Record.push_back(Words[I]);
+}
+
+void PCHWriter::AddAPSInt(const llvm::APSInt &Value, RecordData &Record) {
+ Record.push_back(Value.isUnsigned());
+ AddAPInt(Value, Record);
+}
+
+void PCHWriter::AddAPFloat(const llvm::APFloat &Value, RecordData &Record) {
+ AddAPInt(Value.bitcastToAPInt(), Record);
+}
+
+void PCHWriter::AddIdentifierRef(const IdentifierInfo *II, RecordData &Record) {
+ Record.push_back(getIdentifierRef(II));
+}
+
+pch::IdentID PCHWriter::getIdentifierRef(const IdentifierInfo *II) {
+ if (II == 0)
+ return 0;
+
+ pch::IdentID &ID = IdentifierIDs[II];
+ if (ID == 0)
+ ID = IdentifierIDs.size();
+ return ID;
+}
+
+void PCHWriter::AddSelectorRef(const Selector SelRef, RecordData &Record) {
+ if (SelRef.getAsOpaquePtr() == 0) {
+ Record.push_back(0);
+ return;
+ }
+
+ pch::SelectorID &SID = SelectorIDs[SelRef];
+ if (SID == 0) {
+ SID = SelectorIDs.size();
+ SelVector.push_back(SelRef);
+ }
+ Record.push_back(SID);
+}
+
+void PCHWriter::AddTypeRef(QualType T, RecordData &Record) {
+ if (T.isNull()) {
+ Record.push_back(pch::PREDEF_TYPE_NULL_ID);
+ return;
+ }
+
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr())) {
+ pch::TypeID ID = 0;
+ switch (BT->getKind()) {
+ case BuiltinType::Void: ID = pch::PREDEF_TYPE_VOID_ID; break;
+ case BuiltinType::Bool: ID = pch::PREDEF_TYPE_BOOL_ID; break;
+ case BuiltinType::Char_U: ID = pch::PREDEF_TYPE_CHAR_U_ID; break;
+ case BuiltinType::UChar: ID = pch::PREDEF_TYPE_UCHAR_ID; break;
+ case BuiltinType::UShort: ID = pch::PREDEF_TYPE_USHORT_ID; break;
+ case BuiltinType::UInt: ID = pch::PREDEF_TYPE_UINT_ID; break;
+ case BuiltinType::ULong: ID = pch::PREDEF_TYPE_ULONG_ID; break;
+ case BuiltinType::ULongLong: ID = pch::PREDEF_TYPE_ULONGLONG_ID; break;
+ case BuiltinType::UInt128: ID = pch::PREDEF_TYPE_UINT128_ID; break;
+ case BuiltinType::Char_S: ID = pch::PREDEF_TYPE_CHAR_S_ID; break;
+ case BuiltinType::SChar: ID = pch::PREDEF_TYPE_SCHAR_ID; break;
+ case BuiltinType::WChar: ID = pch::PREDEF_TYPE_WCHAR_ID; break;
+ case BuiltinType::Short: ID = pch::PREDEF_TYPE_SHORT_ID; break;
+ case BuiltinType::Int: ID = pch::PREDEF_TYPE_INT_ID; break;
+ case BuiltinType::Long: ID = pch::PREDEF_TYPE_LONG_ID; break;
+ case BuiltinType::LongLong: ID = pch::PREDEF_TYPE_LONGLONG_ID; break;
+ case BuiltinType::Int128: ID = pch::PREDEF_TYPE_INT128_ID; break;
+ case BuiltinType::Float: ID = pch::PREDEF_TYPE_FLOAT_ID; break;
+ case BuiltinType::Double: ID = pch::PREDEF_TYPE_DOUBLE_ID; break;
+ case BuiltinType::LongDouble: ID = pch::PREDEF_TYPE_LONGDOUBLE_ID; break;
+ case BuiltinType::NullPtr: ID = pch::PREDEF_TYPE_NULLPTR_ID; break;
+ case BuiltinType::Overload: ID = pch::PREDEF_TYPE_OVERLOAD_ID; break;
+ case BuiltinType::Dependent: ID = pch::PREDEF_TYPE_DEPENDENT_ID; break;
+ }
+
+ Record.push_back((ID << 3) | T.getCVRQualifiers());
+ return;
+ }
+
+ pch::TypeID &ID = TypeIDs[T.getTypePtr()];
+ if (ID == 0) {
+ // We haven't seen this type before. Assign it a new ID and put it
+ // into the queu of types to emit.
+ ID = NextTypeID++;
+ TypesToEmit.push(T.getTypePtr());
+ }
+
+ // Encode the type qualifiers in the type reference.
+ Record.push_back((ID << 3) | T.getCVRQualifiers());
+}
+
+void PCHWriter::AddDeclRef(const Decl *D, RecordData &Record) {
+ if (D == 0) {
+ Record.push_back(0);
+ return;
+ }
+
+ pch::DeclID &ID = DeclIDs[D];
+ if (ID == 0) {
+ // We haven't seen this declaration before. Give it a new ID and
+ // enqueue it in the list of declarations to emit.
+ ID = DeclIDs.size();
+ DeclsToEmit.push(const_cast<Decl *>(D));
+ }
+
+ Record.push_back(ID);
+}
+
+pch::DeclID PCHWriter::getDeclID(const Decl *D) {
+ if (D == 0)
+ return 0;
+
+ assert(DeclIDs.find(D) != DeclIDs.end() && "Declaration not emitted!");
+ return DeclIDs[D];
+}
+
+void PCHWriter::AddDeclarationName(DeclarationName Name, RecordData &Record) {
+ // FIXME: Emit a stable enum for NameKind. 0 = Identifier etc.
+ Record.push_back(Name.getNameKind());
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ AddIdentifierRef(Name.getAsIdentifierInfo(), Record);
+ break;
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ AddSelectorRef(Name.getObjCSelector(), Record);
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ AddTypeRef(Name.getCXXNameType(), Record);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ Record.push_back(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ // No extra data to emit
+ break;
+ }
+}
+
diff --git a/lib/Frontend/PCHWriterDecl.cpp b/lib/Frontend/PCHWriterDecl.cpp
new file mode 100644
index 0000000..6734661
--- /dev/null
+++ b/lib/Frontend/PCHWriterDecl.cpp
@@ -0,0 +1,532 @@
+//===--- PCHWriterDecl.cpp - Declaration Serialization --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements serialization for Declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PCHWriter.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Declaration serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class PCHDeclWriter : public DeclVisitor<PCHDeclWriter, void> {
+
+ PCHWriter &Writer;
+ ASTContext &Context;
+ PCHWriter::RecordData &Record;
+
+ public:
+ pch::DeclCode Code;
+ unsigned AbbrevToUse;
+
+ PCHDeclWriter(PCHWriter &Writer, ASTContext &Context,
+ PCHWriter::RecordData &Record)
+ : Writer(Writer), Context(Context), Record(Record) {
+ }
+
+ void VisitDecl(Decl *D);
+ void VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ void VisitNamedDecl(NamedDecl *D);
+ void VisitTypeDecl(TypeDecl *D);
+ void VisitTypedefDecl(TypedefDecl *D);
+ void VisitTagDecl(TagDecl *D);
+ void VisitEnumDecl(EnumDecl *D);
+ void VisitRecordDecl(RecordDecl *D);
+ void VisitValueDecl(ValueDecl *D);
+ void VisitEnumConstantDecl(EnumConstantDecl *D);
+ void VisitFunctionDecl(FunctionDecl *D);
+ void VisitFieldDecl(FieldDecl *D);
+ void VisitVarDecl(VarDecl *D);
+ void VisitImplicitParamDecl(ImplicitParamDecl *D);
+ void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitOriginalParmVarDecl(OriginalParmVarDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitBlockDecl(BlockDecl *D);
+ void VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
+ uint64_t VisibleOffset);
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCContainerDecl(ObjCContainerDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCIvarDecl(ObjCIvarDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
+ void VisitObjCClassDecl(ObjCClassDecl *D);
+ void VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCImplDecl(ObjCImplDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ };
+}
+
+void PCHDeclWriter::VisitDecl(Decl *D) {
+ Writer.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()), Record);
+ Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record);
+ Writer.AddSourceLocation(D->getLocation(), Record);
+ Record.push_back(D->isInvalidDecl());
+ Record.push_back(D->hasAttrs());
+ Record.push_back(D->isImplicit());
+ Record.push_back(D->getAccess());
+}
+
+void PCHDeclWriter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ VisitDecl(D);
+ Code = pch::DECL_TRANSLATION_UNIT;
+}
+
+void PCHDeclWriter::VisitNamedDecl(NamedDecl *D) {
+ VisitDecl(D);
+ Writer.AddDeclarationName(D->getDeclName(), Record);
+}
+
+void PCHDeclWriter::VisitTypeDecl(TypeDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
+}
+
+void PCHDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
+ VisitTypeDecl(D);
+ Writer.AddTypeRef(D->getUnderlyingType(), Record);
+ Code = pch::DECL_TYPEDEF;
+}
+
+void PCHDeclWriter::VisitTagDecl(TagDecl *D) {
+ VisitTypeDecl(D);
+ Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
+ Record.push_back(D->isDefinition());
+ Writer.AddDeclRef(D->getTypedefForAnonDecl(), Record);
+}
+
+void PCHDeclWriter::VisitEnumDecl(EnumDecl *D) {
+ VisitTagDecl(D);
+ Writer.AddTypeRef(D->getIntegerType(), Record);
+ // FIXME: C++ InstantiatedFrom
+ Code = pch::DECL_ENUM;
+}
+
+void PCHDeclWriter::VisitRecordDecl(RecordDecl *D) {
+ VisitTagDecl(D);
+ Record.push_back(D->hasFlexibleArrayMember());
+ Record.push_back(D->isAnonymousStructOrUnion());
+ Code = pch::DECL_RECORD;
+}
+
+void PCHDeclWriter::VisitValueDecl(ValueDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddTypeRef(D->getType(), Record);
+}
+
+void PCHDeclWriter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->getInitExpr()? 1 : 0);
+ if (D->getInitExpr())
+ Writer.AddStmt(D->getInitExpr());
+ Writer.AddAPSInt(D->getInitVal(), Record);
+ Code = pch::DECL_ENUM_CONSTANT;
+}
+
+void PCHDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition())
+ Writer.AddStmt(D->getBody(Context));
+ Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
+ Record.push_back(D->getStorageClass()); // FIXME: stable encoding
+ Record.push_back(D->isInline());
+ Record.push_back(D->isC99InlineDefinition());
+ Record.push_back(D->isVirtualAsWritten());
+ Record.push_back(D->isPure());
+ Record.push_back(D->hasInheritedPrototype());
+ Record.push_back(D->hasWrittenPrototype());
+ Record.push_back(D->isDeleted());
+ Writer.AddSourceLocation(D->getTypeSpecStartLoc(), Record);
+ // FIXME: C++ TemplateOrInstantiation
+ Record.push_back(D->param_size());
+ for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Code = pch::DECL_FUNCTION;
+}
+
+void PCHDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ VisitNamedDecl(D);
+ // FIXME: convert to LazyStmtPtr?
+ // Unlike C/C++, method bodies will never be in header files.
+ Record.push_back(D->getBody() != 0);
+ if (D->getBody() != 0) {
+ Writer.AddStmt(D->getBody(Context));
+ Writer.AddDeclRef(D->getSelfDecl(), Record);
+ Writer.AddDeclRef(D->getCmdDecl(), Record);
+ }
+ Record.push_back(D->isInstanceMethod());
+ Record.push_back(D->isVariadic());
+ Record.push_back(D->isSynthesized());
+ // FIXME: stable encoding for @required/@optional
+ Record.push_back(D->getImplementationControl());
+ // FIXME: stable encoding for in/out/inout/bycopy/byref/oneway
+ Record.push_back(D->getObjCDeclQualifier());
+ Writer.AddTypeRef(D->getResultType(), Record);
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Record.push_back(D->param_size());
+ for (ObjCMethodDecl::param_iterator P = D->param_begin(),
+ PEnd = D->param_end(); P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Code = pch::DECL_OBJC_METHOD;
+}
+
+void PCHDeclWriter::VisitObjCContainerDecl(ObjCContainerDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getAtEndLoc(), Record);
+ // Abstract class (no need to define a stable pch::DECL code).
+}
+
+void PCHDeclWriter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ VisitObjCContainerDecl(D);
+ Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
+ Writer.AddDeclRef(D->getSuperClass(), Record);
+ Record.push_back(D->protocol_size());
+ for (ObjCInterfaceDecl::protocol_iterator P = D->protocol_begin(),
+ PEnd = D->protocol_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Record.push_back(D->ivar_size());
+ for (ObjCInterfaceDecl::ivar_iterator I = D->ivar_begin(),
+ IEnd = D->ivar_end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Writer.AddDeclRef(D->getCategoryList(), Record);
+ Record.push_back(D->isForwardDecl());
+ Record.push_back(D->isImplicitInterfaceDecl());
+ Writer.AddSourceLocation(D->getClassLoc(), Record);
+ Writer.AddSourceLocation(D->getSuperClassLoc(), Record);
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Code = pch::DECL_OBJC_INTERFACE;
+}
+
+void PCHDeclWriter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ VisitFieldDecl(D);
+ // FIXME: stable encoding for @public/@private/@protected/@package
+ Record.push_back(D->getAccessControl());
+ Code = pch::DECL_OBJC_IVAR;
+}
+
+void PCHDeclWriter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ VisitObjCContainerDecl(D);
+ Record.push_back(D->isForwardDecl());
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Record.push_back(D->protocol_size());
+ for (ObjCProtocolDecl::protocol_iterator
+ I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = pch::DECL_OBJC_PROTOCOL;
+}
+
+void PCHDeclWriter::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D) {
+ VisitFieldDecl(D);
+ Code = pch::DECL_OBJC_AT_DEFS_FIELD;
+}
+
+void PCHDeclWriter::VisitObjCClassDecl(ObjCClassDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->size());
+ for (ObjCClassDecl::iterator I = D->begin(), IEnd = D->end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = pch::DECL_OBJC_CLASS;
+}
+
+void PCHDeclWriter::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->protocol_size());
+ for (ObjCProtocolDecl::protocol_iterator
+ I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = pch::DECL_OBJC_FORWARD_PROTOCOL;
+}
+
+void PCHDeclWriter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ VisitObjCContainerDecl(D);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ Record.push_back(D->protocol_size());
+ for (ObjCProtocolDecl::protocol_iterator
+ I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Writer.AddDeclRef(D->getNextClassCategory(), Record);
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Code = pch::DECL_OBJC_CATEGORY;
+}
+
+void PCHDeclWriter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ Code = pch::DECL_OBJC_COMPATIBLE_ALIAS;
+}
+
+void PCHDeclWriter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddTypeRef(D->getType(), Record);
+ // FIXME: stable encoding
+ Record.push_back((unsigned)D->getPropertyAttributes());
+ // FIXME: stable encoding
+ Record.push_back((unsigned)D->getPropertyImplementation());
+ Writer.AddDeclarationName(D->getGetterName(), Record);
+ Writer.AddDeclarationName(D->getSetterName(), Record);
+ Writer.AddDeclRef(D->getGetterMethodDecl(), Record);
+ Writer.AddDeclRef(D->getSetterMethodDecl(), Record);
+ Writer.AddDeclRef(D->getPropertyIvarDecl(), Record);
+ Code = pch::DECL_OBJC_PROPERTY;
+}
+
+void PCHDeclWriter::VisitObjCImplDecl(ObjCImplDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ // Abstract class (no need to define a stable pch::DECL code).
+}
+
+void PCHDeclWriter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ VisitObjCImplDecl(D);
+ Writer.AddIdentifierRef(D->getIdentifier(), Record);
+ Code = pch::DECL_OBJC_CATEGORY_IMPL;
+}
+
+void PCHDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ VisitObjCImplDecl(D);
+ Writer.AddDeclRef(D->getSuperClass(), Record);
+ Code = pch::DECL_OBJC_IMPLEMENTATION;
+}
+
+void PCHDeclWriter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ VisitDecl(D);
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Writer.AddDeclRef(D->getPropertyDecl(), Record);
+ Writer.AddDeclRef(D->getPropertyIvarDecl(), Record);
+ Code = pch::DECL_OBJC_PROPERTY_IMPL;
+}
+
+void PCHDeclWriter::VisitFieldDecl(FieldDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->isMutable());
+ Record.push_back(D->getBitWidth()? 1 : 0);
+ if (D->getBitWidth())
+ Writer.AddStmt(D->getBitWidth());
+ Code = pch::DECL_FIELD;
+}
+
+void PCHDeclWriter::VisitVarDecl(VarDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->getStorageClass()); // FIXME: stable encoding
+ Record.push_back(D->isThreadSpecified());
+ Record.push_back(D->hasCXXDirectInitializer());
+ Record.push_back(D->isDeclaredInCondition());
+ Writer.AddDeclRef(D->getPreviousDeclaration(), Record);
+ Writer.AddSourceLocation(D->getTypeSpecStartLoc(), Record);
+ Record.push_back(D->getInit()? 1 : 0);
+ if (D->getInit())
+ Writer.AddStmt(D->getInit());
+ Code = pch::DECL_VAR;
+}
+
+void PCHDeclWriter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ VisitVarDecl(D);
+ Code = pch::DECL_IMPLICIT_PARAM;
+}
+
+void PCHDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
+ VisitVarDecl(D);
+ Record.push_back(D->getObjCDeclQualifier()); // FIXME: stable encoding
+ // FIXME: emit default argument (C++)
+ // FIXME: why isn't the "default argument" just stored as the initializer
+ // in VarDecl?
+ Code = pch::DECL_PARM_VAR;
+
+
+ // If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
+ // we dynamically check for the properties that we optimize for, but don't
+ // know are true of all PARM_VAR_DECLs.
+ if (!D->hasAttrs() &&
+ !D->isImplicit() &&
+ D->getAccess() == AS_none &&
+ D->getStorageClass() == 0 &&
+ !D->hasCXXDirectInitializer() && // Can params have this ever?
+ D->getObjCDeclQualifier() == 0)
+ AbbrevToUse = Writer.getParmVarDeclAbbrev();
+
+ // Check things we know are true of *every* PARM_VAR_DECL, which is more than
+ // just us assuming it.
+ assert(!D->isInvalidDecl() && "Shouldn't emit invalid decls");
+ assert(!D->isThreadSpecified() && "PARM_VAR_DECL can't be __thread");
+ assert(D->getAccess() == AS_none && "PARM_VAR_DECL can't be public/private");
+ assert(!D->isDeclaredInCondition() && "PARM_VAR_DECL can't be in condition");
+ assert(D->getPreviousDeclaration() == 0 && "PARM_VAR_DECL can't be redecl");
+ assert(D->getInit() == 0 && "PARM_VAR_DECL never has init");
+}
+
+void PCHDeclWriter::VisitOriginalParmVarDecl(OriginalParmVarDecl *D) {
+ VisitParmVarDecl(D);
+ Writer.AddTypeRef(D->getOriginalType(), Record);
+ Code = pch::DECL_ORIGINAL_PARM_VAR;
+ AbbrevToUse = 0;
+}
+
+void PCHDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getAsmString());
+ Code = pch::DECL_FILE_SCOPE_ASM;
+}
+
+void PCHDeclWriter::VisitBlockDecl(BlockDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getBody());
+ Record.push_back(D->param_size());
+ for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Code = pch::DECL_BLOCK;
+}
+
+/// \brief Emit the DeclContext part of a declaration context decl.
+///
+/// \param LexicalOffset the offset at which the DECL_CONTEXT_LEXICAL
+/// block for this declaration context is stored. May be 0 to indicate
+/// that there are no declarations stored within this context.
+///
+/// \param VisibleOffset the offset at which the DECL_CONTEXT_VISIBLE
+/// block for this declaration context is stored. May be 0 to indicate
+/// that there are no declarations visible from this context. Note
+/// that this value will not be emitted for non-primary declaration
+/// contexts.
+void PCHDeclWriter::VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
+ uint64_t VisibleOffset) {
+ Record.push_back(LexicalOffset);
+ Record.push_back(VisibleOffset);
+}
+
+
+//===----------------------------------------------------------------------===//
+// PCHWriter Implementation
+//===----------------------------------------------------------------------===//
+
+void PCHWriter::WriteDeclsBlockAbbrevs() {
+ using namespace llvm;
+ // Abbreviation for DECL_PARM_VAR.
+ BitCodeAbbrev *Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(pch::DECL_PARM_VAR));
+
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl (!?)
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // VarDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // StorageClass
+ Abv->Add(BitCodeAbbrevOp(0)); // isThreadSpecified
+ Abv->Add(BitCodeAbbrevOp(0)); // hasCXXDirectInitializer
+ Abv->Add(BitCodeAbbrevOp(0)); // isDeclaredInCondition
+ Abv->Add(BitCodeAbbrevOp(0)); // PrevDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeSpecStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // HasInit
+ // ParmVarDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // ObjCDeclQualifier
+
+ ParmVarDeclAbbrev = Stream.EmitAbbrev(Abv);
+}
+
+/// \brief Write a block containing all of the declarations.
+void PCHWriter::WriteDeclsBlock(ASTContext &Context) {
+ // Enter the declarations block.
+ Stream.EnterSubblock(pch::DECLS_BLOCK_ID, 3);
+
+ // Output the abbreviations that we will use in this block.
+ WriteDeclsBlockAbbrevs();
+
+ // Emit all of the declarations.
+ RecordData Record;
+ PCHDeclWriter W(*this, Context, Record);
+ while (!DeclsToEmit.empty()) {
+ // Pull the next declaration off the queue
+ Decl *D = DeclsToEmit.front();
+ DeclsToEmit.pop();
+
+ // If this declaration is also a DeclContext, write blocks for the
+ // declarations that lexically stored inside its context and those
+ // declarations that are visible from its context. These blocks
+ // are written before the declaration itself so that we can put
+ // their offsets into the record for the declaration.
+ uint64_t LexicalOffset = 0;
+ uint64_t VisibleOffset = 0;
+ DeclContext *DC = dyn_cast<DeclContext>(D);
+ if (DC) {
+ LexicalOffset = WriteDeclContextLexicalBlock(Context, DC);
+ VisibleOffset = WriteDeclContextVisibleBlock(Context, DC);
+ }
+
+ // Determine the ID for this declaration
+ pch::DeclID &ID = DeclIDs[D];
+ if (ID == 0)
+ ID = DeclIDs.size();
+
+ unsigned Index = ID - 1;
+
+ // Record the offset for this declaration
+ if (DeclOffsets.size() == Index)
+ DeclOffsets.push_back(Stream.GetCurrentBitNo());
+ else if (DeclOffsets.size() < Index) {
+ DeclOffsets.resize(Index+1);
+ DeclOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ // Build and emit a record for this declaration
+ Record.clear();
+ W.Code = (pch::DeclCode)0;
+ W.AbbrevToUse = 0;
+ W.Visit(D);
+ if (DC) W.VisitDeclContext(DC, LexicalOffset, VisibleOffset);
+
+ if (!W.Code) {
+ fprintf(stderr, "Cannot serialize declaration of kind %s\n",
+ D->getDeclKindName());
+ assert(false && "Unhandled declaration kind while generating PCH");
+ exit(-1);
+ }
+ Stream.EmitRecord(W.Code, Record, W.AbbrevToUse);
+
+ // If the declaration had any attributes, write them now.
+ if (D->hasAttrs())
+ WriteAttributeRecord(D->getAttrs());
+
+ // Flush any expressions that were written as part of this declaration.
+ FlushStmts();
+
+ // Note external declarations so that we can add them to a record
+ // in the PCH file later.
+ if (isa<FileScopeAsmDecl>(D))
+ ExternalDefinitions.push_back(ID);
+ }
+
+ // Exit the declarations block
+ Stream.ExitBlock();
+}
diff --git a/lib/Frontend/PCHWriterStmt.cpp b/lib/Frontend/PCHWriterStmt.cpp
new file mode 100644
index 0000000..b7caee5
--- /dev/null
+++ b/lib/Frontend/PCHWriterStmt.cpp
@@ -0,0 +1,829 @@
+//===--- PCHWriterStmt.cpp - Statement and Expression Serialization -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements serialization for Statements and Expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PCHWriter.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Statement/expression serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class PCHStmtWriter : public StmtVisitor<PCHStmtWriter, void> {
+
+ PCHWriter &Writer;
+ PCHWriter::RecordData &Record;
+
+ public:
+ pch::StmtCode Code;
+
+ PCHStmtWriter(PCHWriter &Writer, PCHWriter::RecordData &Record)
+ : Writer(Writer), Record(Record) { }
+
+ void VisitStmt(Stmt *S);
+ void VisitNullStmt(NullStmt *S);
+ void VisitCompoundStmt(CompoundStmt *S);
+ void VisitSwitchCase(SwitchCase *S);
+ void VisitCaseStmt(CaseStmt *S);
+ void VisitDefaultStmt(DefaultStmt *S);
+ void VisitLabelStmt(LabelStmt *S);
+ void VisitIfStmt(IfStmt *S);
+ void VisitSwitchStmt(SwitchStmt *S);
+ void VisitWhileStmt(WhileStmt *S);
+ void VisitDoStmt(DoStmt *S);
+ void VisitForStmt(ForStmt *S);
+ void VisitGotoStmt(GotoStmt *S);
+ void VisitIndirectGotoStmt(IndirectGotoStmt *S);
+ void VisitContinueStmt(ContinueStmt *S);
+ void VisitBreakStmt(BreakStmt *S);
+ void VisitReturnStmt(ReturnStmt *S);
+ void VisitDeclStmt(DeclStmt *S);
+ void VisitAsmStmt(AsmStmt *S);
+ void VisitExpr(Expr *E);
+ void VisitPredefinedExpr(PredefinedExpr *E);
+ void VisitDeclRefExpr(DeclRefExpr *E);
+ void VisitIntegerLiteral(IntegerLiteral *E);
+ void VisitFloatingLiteral(FloatingLiteral *E);
+ void VisitImaginaryLiteral(ImaginaryLiteral *E);
+ void VisitStringLiteral(StringLiteral *E);
+ void VisitCharacterLiteral(CharacterLiteral *E);
+ void VisitParenExpr(ParenExpr *E);
+ void VisitUnaryOperator(UnaryOperator *E);
+ void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E);
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ void VisitCallExpr(CallExpr *E);
+ void VisitMemberExpr(MemberExpr *E);
+ void VisitCastExpr(CastExpr *E);
+ void VisitBinaryOperator(BinaryOperator *E);
+ void VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ void VisitConditionalOperator(ConditionalOperator *E);
+ void VisitImplicitCastExpr(ImplicitCastExpr *E);
+ void VisitExplicitCastExpr(ExplicitCastExpr *E);
+ void VisitCStyleCastExpr(CStyleCastExpr *E);
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ void VisitExtVectorElementExpr(ExtVectorElementExpr *E);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitDesignatedInitExpr(DesignatedInitExpr *E);
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ void VisitVAArgExpr(VAArgExpr *E);
+ void VisitAddrLabelExpr(AddrLabelExpr *E);
+ void VisitStmtExpr(StmtExpr *E);
+ void VisitTypesCompatibleExpr(TypesCompatibleExpr *E);
+ void VisitChooseExpr(ChooseExpr *E);
+ void VisitGNUNullExpr(GNUNullExpr *E);
+ void VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ void VisitBlockExpr(BlockExpr *E);
+ void VisitBlockDeclRefExpr(BlockDeclRefExpr *E);
+
+ // Objective-C Expressions
+ void VisitObjCStringLiteral(ObjCStringLiteral *E);
+ void VisitObjCEncodeExpr(ObjCEncodeExpr *E);
+ void VisitObjCSelectorExpr(ObjCSelectorExpr *E);
+ void VisitObjCProtocolExpr(ObjCProtocolExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E);
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+ void VisitObjCKVCRefExpr(ObjCKVCRefExpr *E);
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCSuperExpr(ObjCSuperExpr *E);
+
+ // Objective-C Statements
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *);
+ void VisitObjCAtCatchStmt(ObjCAtCatchStmt *);
+ void VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *);
+ void VisitObjCAtTryStmt(ObjCAtTryStmt *);
+ void VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *);
+ void VisitObjCAtThrowStmt(ObjCAtThrowStmt *);
+ };
+}
+
+void PCHStmtWriter::VisitStmt(Stmt *S) {
+}
+
+void PCHStmtWriter::VisitNullStmt(NullStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getSemiLoc(), Record);
+ Code = pch::STMT_NULL;
+}
+
+void PCHStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->size());
+ for (CompoundStmt::body_iterator CS = S->body_begin(), CSEnd = S->body_end();
+ CS != CSEnd; ++CS)
+ Writer.WriteSubStmt(*CS);
+ Writer.AddSourceLocation(S->getLBracLoc(), Record);
+ Writer.AddSourceLocation(S->getRBracLoc(), Record);
+ Code = pch::STMT_COMPOUND;
+}
+
+void PCHStmtWriter::VisitSwitchCase(SwitchCase *S) {
+ VisitStmt(S);
+ Record.push_back(Writer.RecordSwitchCaseID(S));
+}
+
+void PCHStmtWriter::VisitCaseStmt(CaseStmt *S) {
+ VisitSwitchCase(S);
+ Writer.WriteSubStmt(S->getLHS());
+ Writer.WriteSubStmt(S->getRHS());
+ Writer.WriteSubStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getCaseLoc(), Record);
+ Writer.AddSourceLocation(S->getEllipsisLoc(), Record);
+ Writer.AddSourceLocation(S->getColonLoc(), Record);
+ Code = pch::STMT_CASE;
+}
+
+void PCHStmtWriter::VisitDefaultStmt(DefaultStmt *S) {
+ VisitSwitchCase(S);
+ Writer.WriteSubStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getDefaultLoc(), Record);
+ Writer.AddSourceLocation(S->getColonLoc(), Record);
+ Code = pch::STMT_DEFAULT;
+}
+
+void PCHStmtWriter::VisitLabelStmt(LabelStmt *S) {
+ VisitStmt(S);
+ Writer.AddIdentifierRef(S->getID(), Record);
+ Writer.WriteSubStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getIdentLoc(), Record);
+ Record.push_back(Writer.GetLabelID(S));
+ Code = pch::STMT_LABEL;
+}
+
+void PCHStmtWriter::VisitIfStmt(IfStmt *S) {
+ VisitStmt(S);
+ Writer.WriteSubStmt(S->getCond());
+ Writer.WriteSubStmt(S->getThen());
+ Writer.WriteSubStmt(S->getElse());
+ Writer.AddSourceLocation(S->getIfLoc(), Record);
+ Writer.AddSourceLocation(S->getElseLoc(), Record);
+ Code = pch::STMT_IF;
+}
+
+void PCHStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
+ VisitStmt(S);
+ Writer.WriteSubStmt(S->getCond());
+ Writer.WriteSubStmt(S->getBody());
+ Writer.AddSourceLocation(S->getSwitchLoc(), Record);
+ for (SwitchCase *SC = S->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase())
+ Record.push_back(Writer.getSwitchCaseID(SC));
+ Code = pch::STMT_SWITCH;
+}
+
+void PCHStmtWriter::VisitWhileStmt(WhileStmt *S) {
+ VisitStmt(S);
+ Writer.WriteSubStmt(S->getCond());
+ Writer.WriteSubStmt(S->getBody());
+ Writer.AddSourceLocation(S->getWhileLoc(), Record);
+ Code = pch::STMT_WHILE;
+}
+
+void PCHStmtWriter::VisitDoStmt(DoStmt *S) {
+ VisitStmt(S);
+ Writer.WriteSubStmt(S->getCond());
+ Writer.WriteSubStmt(S->getBody());
+ Writer.AddSourceLocation(S->getDoLoc(), Record);
+ Writer.AddSourceLocation(S->getWhileLoc(), Record);
+ Code = pch::STMT_DO;
+}
+
+void PCHStmtWriter::VisitForStmt(ForStmt *S) {
+ VisitStmt(S);
+ Writer.WriteSubStmt(S->getInit());
+ Writer.WriteSubStmt(S->getCond());
+ Writer.WriteSubStmt(S->getInc());
+ Writer.WriteSubStmt(S->getBody());
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getLParenLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = pch::STMT_FOR;
+}
+
+void PCHStmtWriter::VisitGotoStmt(GotoStmt *S) {
+ VisitStmt(S);
+ Record.push_back(Writer.GetLabelID(S->getLabel()));
+ Writer.AddSourceLocation(S->getGotoLoc(), Record);
+ Writer.AddSourceLocation(S->getLabelLoc(), Record);
+ Code = pch::STMT_GOTO;
+}
+
+void PCHStmtWriter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getGotoLoc(), Record);
+ Writer.AddSourceLocation(S->getStarLoc(), Record);
+ Writer.WriteSubStmt(S->getTarget());
+ Code = pch::STMT_INDIRECT_GOTO;
+}
+
+void PCHStmtWriter::VisitContinueStmt(ContinueStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getContinueLoc(), Record);
+ Code = pch::STMT_CONTINUE;
+}
+
+void PCHStmtWriter::VisitBreakStmt(BreakStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getBreakLoc(), Record);
+ Code = pch::STMT_BREAK;
+}
+
+void PCHStmtWriter::VisitReturnStmt(ReturnStmt *S) {
+ VisitStmt(S);
+ Writer.WriteSubStmt(S->getRetValue());
+ Writer.AddSourceLocation(S->getReturnLoc(), Record);
+ Code = pch::STMT_RETURN;
+}
+
+void PCHStmtWriter::VisitDeclStmt(DeclStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getStartLoc(), Record);
+ Writer.AddSourceLocation(S->getEndLoc(), Record);
+ DeclGroupRef DG = S->getDeclGroup();
+ for (DeclGroupRef::iterator D = DG.begin(), DEnd = DG.end(); D != DEnd; ++D)
+ Writer.AddDeclRef(*D, Record);
+ Code = pch::STMT_DECL;
+}
+
+void PCHStmtWriter::VisitAsmStmt(AsmStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getNumOutputs());
+ Record.push_back(S->getNumInputs());
+ Record.push_back(S->getNumClobbers());
+ Writer.AddSourceLocation(S->getAsmLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Record.push_back(S->isVolatile());
+ Record.push_back(S->isSimple());
+ Writer.WriteSubStmt(S->getAsmString());
+
+ // Outputs
+ for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
+ Writer.AddString(S->getOutputName(I), Record);
+ Writer.WriteSubStmt(S->getOutputConstraintLiteral(I));
+ Writer.WriteSubStmt(S->getOutputExpr(I));
+ }
+
+ // Inputs
+ for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
+ Writer.AddString(S->getInputName(I), Record);
+ Writer.WriteSubStmt(S->getInputConstraintLiteral(I));
+ Writer.WriteSubStmt(S->getInputExpr(I));
+ }
+
+ // Clobbers
+ for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
+ Writer.WriteSubStmt(S->getClobber(I));
+
+ Code = pch::STMT_ASM;
+}
+
+void PCHStmtWriter::VisitExpr(Expr *E) {
+ VisitStmt(E);
+ Writer.AddTypeRef(E->getType(), Record);
+ Record.push_back(E->isTypeDependent());
+ Record.push_back(E->isValueDependent());
+}
+
+void PCHStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->getIdentType()); // FIXME: stable encoding
+ Code = pch::EXPR_PREDEFINED;
+}
+
+void PCHStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getDecl(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = pch::EXPR_DECL_REF;
+}
+
+void PCHStmtWriter::VisitIntegerLiteral(IntegerLiteral *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddAPInt(E->getValue(), Record);
+ Code = pch::EXPR_INTEGER_LITERAL;
+}
+
+void PCHStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) {
+ VisitExpr(E);
+ Writer.AddAPFloat(E->getValue(), Record);
+ Record.push_back(E->isExact());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = pch::EXPR_FLOATING_LITERAL;
+}
+
+void PCHStmtWriter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getSubExpr());
+ Code = pch::EXPR_IMAGINARY_LITERAL;
+}
+
+void PCHStmtWriter::VisitStringLiteral(StringLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getByteLength());
+ Record.push_back(E->getNumConcatenated());
+ Record.push_back(E->isWide());
+ // FIXME: String data should be stored as a blob at the end of the
+ // StringLiteral. However, we can't do so now because we have no
+ // provision for coping with abbreviations when we're jumping around
+ // the PCH file during deserialization.
+ Record.insert(Record.end(),
+ E->getStrData(), E->getStrData() + E->getByteLength());
+ for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ Writer.AddSourceLocation(E->getStrTokenLoc(I), Record);
+ Code = pch::EXPR_STRING_LITERAL;
+}
+
+void PCHStmtWriter::VisitCharacterLiteral(CharacterLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLoc(), Record);
+ Record.push_back(E->isWide());
+ Code = pch::EXPR_CHARACTER_LITERAL;
+}
+
+void PCHStmtWriter::VisitParenExpr(ParenExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLParen(), Record);
+ Writer.AddSourceLocation(E->getRParen(), Record);
+ Writer.WriteSubStmt(E->getSubExpr());
+ Code = pch::EXPR_PAREN;
+}
+
+void PCHStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getSubExpr());
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = pch::EXPR_UNARY_OPERATOR;
+}
+
+void PCHStmtWriter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isSizeOf());
+ if (E->isArgumentType())
+ Writer.AddTypeRef(E->getArgumentType(), Record);
+ else {
+ Record.push_back(0);
+ Writer.WriteSubStmt(E->getArgumentExpr());
+ }
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_SIZEOF_ALIGN_OF;
+}
+
+void PCHStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getLHS());
+ Writer.WriteSubStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getRBracketLoc(), Record);
+ Code = pch::EXPR_ARRAY_SUBSCRIPT;
+}
+
+void PCHStmtWriter::VisitCallExpr(CallExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.WriteSubStmt(E->getCallee());
+ for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg)
+ Writer.WriteSubStmt(*Arg);
+ Code = pch::EXPR_CALL;
+}
+
+void PCHStmtWriter::VisitMemberExpr(MemberExpr *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getBase());
+ Writer.AddDeclRef(E->getMemberDecl(), Record);
+ Writer.AddSourceLocation(E->getMemberLoc(), Record);
+ Record.push_back(E->isArrow());
+ Code = pch::EXPR_MEMBER;
+}
+
+void PCHStmtWriter::VisitCastExpr(CastExpr *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getSubExpr());
+}
+
+void PCHStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getLHS());
+ Writer.WriteSubStmt(E->getRHS());
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = pch::EXPR_BINARY_OPERATOR;
+}
+
+void PCHStmtWriter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ VisitBinaryOperator(E);
+ Writer.AddTypeRef(E->getComputationLHSType(), Record);
+ Writer.AddTypeRef(E->getComputationResultType(), Record);
+ Code = pch::EXPR_COMPOUND_ASSIGN_OPERATOR;
+}
+
+void PCHStmtWriter::VisitConditionalOperator(ConditionalOperator *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getCond());
+ Writer.WriteSubStmt(E->getLHS());
+ Writer.WriteSubStmt(E->getRHS());
+ Code = pch::EXPR_CONDITIONAL_OPERATOR;
+}
+
+void PCHStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ VisitCastExpr(E);
+ Record.push_back(E->isLvalueCast());
+ Code = pch::EXPR_IMPLICIT_CAST;
+}
+
+void PCHStmtWriter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ VisitCastExpr(E);
+ Writer.AddTypeRef(E->getTypeAsWritten(), Record);
+}
+
+void PCHStmtWriter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_CSTYLE_CAST;
+}
+
+void PCHStmtWriter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.WriteSubStmt(E->getInitializer());
+ Record.push_back(E->isFileScope());
+ Code = pch::EXPR_COMPOUND_LITERAL;
+}
+
+void PCHStmtWriter::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getBase());
+ Writer.AddIdentifierRef(&E->getAccessor(), Record);
+ Writer.AddSourceLocation(E->getAccessorLoc(), Record);
+ Code = pch::EXPR_EXT_VECTOR_ELEMENT;
+}
+
+void PCHStmtWriter::VisitInitListExpr(InitListExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumInits());
+ for (unsigned I = 0, N = E->getNumInits(); I != N; ++I)
+ Writer.WriteSubStmt(E->getInit(I));
+ Writer.WriteSubStmt(E->getSyntacticForm());
+ Writer.AddSourceLocation(E->getLBraceLoc(), Record);
+ Writer.AddSourceLocation(E->getRBraceLoc(), Record);
+ Writer.AddDeclRef(E->getInitializedFieldInUnion(), Record);
+ Record.push_back(E->hadArrayRangeDesignator());
+ Code = pch::EXPR_INIT_LIST;
+}
+
+void PCHStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSubExprs());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.WriteSubStmt(E->getSubExpr(I));
+ Writer.AddSourceLocation(E->getEqualOrColonLoc(), Record);
+ Record.push_back(E->usesGNUSyntax());
+ for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
+ DEnd = E->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ if (FieldDecl *Field = D->getField()) {
+ Record.push_back(pch::DESIG_FIELD_DECL);
+ Writer.AddDeclRef(Field, Record);
+ } else {
+ Record.push_back(pch::DESIG_FIELD_NAME);
+ Writer.AddIdentifierRef(D->getFieldName(), Record);
+ }
+ Writer.AddSourceLocation(D->getDotLoc(), Record);
+ Writer.AddSourceLocation(D->getFieldLoc(), Record);
+ } else if (D->isArrayDesignator()) {
+ Record.push_back(pch::DESIG_ARRAY);
+ Record.push_back(D->getFirstExprIndex());
+ Writer.AddSourceLocation(D->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(D->getRBracketLoc(), Record);
+ } else {
+ assert(D->isArrayRangeDesignator() && "Unknown designator");
+ Record.push_back(pch::DESIG_ARRAY_RANGE);
+ Record.push_back(D->getFirstExprIndex());
+ Writer.AddSourceLocation(D->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(D->getEllipsisLoc(), Record);
+ Writer.AddSourceLocation(D->getRBracketLoc(), Record);
+ }
+ }
+ Code = pch::EXPR_DESIGNATED_INIT;
+}
+
+void PCHStmtWriter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ VisitExpr(E);
+ Code = pch::EXPR_IMPLICIT_VALUE_INIT;
+}
+
+void PCHStmtWriter::VisitVAArgExpr(VAArgExpr *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getSubExpr());
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_VA_ARG;
+}
+
+void PCHStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getAmpAmpLoc(), Record);
+ Writer.AddSourceLocation(E->getLabelLoc(), Record);
+ Record.push_back(Writer.GetLabelID(E->getLabel()));
+ Code = pch::EXPR_ADDR_LABEL;
+}
+
+void PCHStmtWriter::VisitStmtExpr(StmtExpr *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getSubStmt());
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_STMT;
+}
+
+void PCHStmtWriter::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
+ VisitExpr(E);
+ Writer.AddTypeRef(E->getArgType1(), Record);
+ Writer.AddTypeRef(E->getArgType2(), Record);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_TYPES_COMPATIBLE;
+}
+
+void PCHStmtWriter::VisitChooseExpr(ChooseExpr *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getCond());
+ Writer.WriteSubStmt(E->getLHS());
+ Writer.WriteSubStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_CHOOSE;
+}
+
+void PCHStmtWriter::VisitGNUNullExpr(GNUNullExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getTokenLocation(), Record);
+ Code = pch::EXPR_GNU_NULL;
+}
+
+void PCHStmtWriter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSubExprs());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.WriteSubStmt(E->getExpr(I));
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_SHUFFLE_VECTOR;
+}
+
+void PCHStmtWriter::VisitBlockExpr(BlockExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getBlockDecl(), Record);
+ Record.push_back(E->hasBlockDeclRefExprs());
+ Code = pch::EXPR_BLOCK;
+}
+
+void PCHStmtWriter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getDecl(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->isByRef());
+ Code = pch::EXPR_BLOCK_DECL_REF;
+}
+
+//===----------------------------------------------------------------------===//
+// Objective-C Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void PCHStmtWriter::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+ VisitExpr(E);
+ Writer.WriteSubStmt(E->getString());
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Code = pch::EXPR_OBJC_STRING_LITERAL;
+}
+
+void PCHStmtWriter::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ VisitExpr(E);
+ Writer.AddTypeRef(E->getEncodedType(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_OBJC_ENCODE;
+}
+
+void PCHStmtWriter::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ VisitExpr(E);
+ Writer.AddSelectorRef(E->getSelector(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_OBJC_SELECTOR_EXPR;
+}
+
+void PCHStmtWriter::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getProtocol(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = pch::EXPR_OBJC_PROTOCOL_EXPR;
+}
+
+void PCHStmtWriter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getDecl(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.WriteSubStmt(E->getBase());
+ Record.push_back(E->isArrow());
+ Record.push_back(E->isFreeIvar());
+ Code = pch::EXPR_OBJC_IVAR_REF_EXPR;
+}
+
+void PCHStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getProperty(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.WriteSubStmt(E->getBase());
+ Code = pch::EXPR_OBJC_PROPERTY_REF_EXPR;
+}
+
+void PCHStmtWriter::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getGetterMethod(), Record);
+ Writer.AddDeclRef(E->getSetterMethod(), Record);
+
+ // NOTE: ClassProp and Base are mutually exclusive.
+ Writer.AddDeclRef(E->getClassProp(), Record);
+ Writer.WriteSubStmt(E->getBase());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddSourceLocation(E->getClassLoc(), Record);
+ Code = pch::EXPR_OBJC_KVC_REF_EXPR;
+}
+
+void PCHStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ Writer.AddSourceLocation(E->getLeftLoc(), Record);
+ Writer.AddSourceLocation(E->getRightLoc(), Record);
+ Writer.AddSelectorRef(E->getSelector(), Record);
+ Writer.AddDeclRef(E->getMethodDecl(), Record); // optional
+ Writer.WriteSubStmt(E->getReceiver());
+
+ if (!E->getReceiver()) {
+ ObjCMessageExpr::ClassInfo CI = E->getClassInfo();
+ Writer.AddDeclRef(CI.first, Record);
+ Writer.AddIdentifierRef(CI.second, Record);
+ }
+
+ for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg)
+ Writer.WriteSubStmt(*Arg);
+ Code = pch::EXPR_OBJC_MESSAGE_EXPR;
+}
+
+void PCHStmtWriter::VisitObjCSuperExpr(ObjCSuperExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLoc(), Record);
+ Code = pch::EXPR_OBJC_SUPER_EXPR;
+}
+
+void PCHStmtWriter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+ Writer.WriteSubStmt(S->getElement());
+ Writer.WriteSubStmt(S->getCollection());
+ Writer.WriteSubStmt(S->getBody());
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = pch::STMT_OBJC_FOR_COLLECTION;
+}
+
+void PCHStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ Writer.WriteSubStmt(S->getCatchBody());
+ Writer.WriteSubStmt(S->getNextCatchStmt());
+ Writer.AddDeclRef(S->getCatchParamDecl(), Record);
+ Writer.AddSourceLocation(S->getAtCatchLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = pch::STMT_OBJC_CATCH;
+}
+
+void PCHStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ Writer.WriteSubStmt(S->getFinallyBody());
+ Writer.AddSourceLocation(S->getAtFinallyLoc(), Record);
+ Code = pch::STMT_OBJC_FINALLY;
+}
+
+void PCHStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ Writer.WriteSubStmt(S->getTryBody());
+ Writer.WriteSubStmt(S->getCatchStmts());
+ Writer.WriteSubStmt(S->getFinallyStmt());
+ Writer.AddSourceLocation(S->getAtTryLoc(), Record);
+ Code = pch::STMT_OBJC_AT_TRY;
+}
+
+void PCHStmtWriter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ Writer.WriteSubStmt(S->getSynchExpr());
+ Writer.WriteSubStmt(S->getSynchBody());
+ Writer.AddSourceLocation(S->getAtSynchronizedLoc(), Record);
+ Code = pch::STMT_OBJC_AT_SYNCHRONIZED;
+}
+
+void PCHStmtWriter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ Writer.WriteSubStmt(S->getThrowExpr());
+ Writer.AddSourceLocation(S->getThrowLoc(), Record);
+ Code = pch::STMT_OBJC_AT_THROW;
+}
+
+//===----------------------------------------------------------------------===//
+// PCHWriter Implementation
+//===----------------------------------------------------------------------===//
+
+unsigned PCHWriter::RecordSwitchCaseID(SwitchCase *S) {
+ assert(SwitchCaseIDs.find(S) == SwitchCaseIDs.end() &&
+ "SwitchCase recorded twice");
+ unsigned NextID = SwitchCaseIDs.size();
+ SwitchCaseIDs[S] = NextID;
+ return NextID;
+}
+
+unsigned PCHWriter::getSwitchCaseID(SwitchCase *S) {
+ assert(SwitchCaseIDs.find(S) != SwitchCaseIDs.end() &&
+ "SwitchCase hasn't been seen yet");
+ return SwitchCaseIDs[S];
+}
+
+/// \brief Retrieve the ID for the given label statement, which may
+/// or may not have been emitted yet.
+unsigned PCHWriter::GetLabelID(LabelStmt *S) {
+ std::map<LabelStmt *, unsigned>::iterator Pos = LabelIDs.find(S);
+ if (Pos != LabelIDs.end())
+ return Pos->second;
+
+ unsigned NextID = LabelIDs.size();
+ LabelIDs[S] = NextID;
+ return NextID;
+}
+
+/// \brief Write the given substatement or subexpression to the
+/// bitstream.
+void PCHWriter::WriteSubStmt(Stmt *S) {
+ RecordData Record;
+ PCHStmtWriter Writer(*this, Record);
+ ++NumStatements;
+
+ if (!S) {
+ Stream.EmitRecord(pch::STMT_NULL_PTR, Record);
+ return;
+ }
+
+ Writer.Code = pch::STMT_NULL_PTR;
+ Writer.Visit(S);
+ assert(Writer.Code != pch::STMT_NULL_PTR &&
+ "Unhandled expression writing PCH file");
+ Stream.EmitRecord(Writer.Code, Record);
+}
+
+/// \brief Flush all of the statements that have been added to the
+/// queue via AddStmt().
+void PCHWriter::FlushStmts() {
+ RecordData Record;
+ PCHStmtWriter Writer(*this, Record);
+
+ for (unsigned I = 0, N = StmtsToEmit.size(); I != N; ++I) {
+ ++NumStatements;
+ Stmt *S = StmtsToEmit[I];
+
+ if (!S) {
+ Stream.EmitRecord(pch::STMT_NULL_PTR, Record);
+ continue;
+ }
+
+ Writer.Code = pch::STMT_NULL_PTR;
+ Writer.Visit(S);
+ assert(Writer.Code != pch::STMT_NULL_PTR &&
+ "Unhandled expression writing PCH file");
+ Stream.EmitRecord(Writer.Code, Record);
+
+ assert(N == StmtsToEmit.size() &&
+ "Substatement writen via AddStmt rather than WriteSubStmt!");
+
+ // Note that we are at the end of a full expression. Any
+ // expression records that follow this one are part of a different
+ // expression.
+ Record.clear();
+ Stream.EmitRecord(pch::STMT_STOP, Record);
+ }
+
+ StmtsToEmit.clear();
+ SwitchCaseIDs.clear();
+}
diff --git a/lib/Frontend/PlistDiagnostics.cpp b/lib/Frontend/PlistDiagnostics.cpp
new file mode 100644
index 0000000..387ed45
--- /dev/null
+++ b/lib/Frontend/PlistDiagnostics.cpp
@@ -0,0 +1,389 @@
+//===--- PlistDiagnostics.cpp - Plist Diagnostics for Paths -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PlistDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Analysis/PathDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/System/Path.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+using llvm::cast;
+
+typedef llvm::DenseMap<FileID, unsigned> FIDMap;
+
+namespace clang {
+ class Preprocessor;
+ class PreprocessorFactory;
+}
+
+namespace {
+ class VISIBILITY_HIDDEN PlistDiagnostics : public PathDiagnosticClient {
+ std::vector<const PathDiagnostic*> BatchedDiags;
+ const std::string OutputFile;
+ const LangOptions &LangOpts;
+ public:
+ PlistDiagnostics(const std::string& prefix, const LangOptions &LangOpts);
+ ~PlistDiagnostics();
+ void HandlePathDiagnostic(const PathDiagnostic* D);
+
+ PathGenerationScheme getGenerationScheme() const { return Extensive; }
+ bool supportsLogicalOpControlFlow() const { return true; }
+ bool supportsAllBlockEdges() const { return true; }
+ virtual bool useVerboseDescription() const { return false; }
+ };
+} // end anonymous namespace
+
+PlistDiagnostics::PlistDiagnostics(const std::string& output,
+ const LangOptions &LO)
+ : OutputFile(output), LangOpts(LO) {}
+
+PathDiagnosticClient*
+clang::CreatePlistDiagnosticClient(const std::string& s,
+ Preprocessor *PP, PreprocessorFactory*) {
+ return new PlistDiagnostics(s, PP->getLangOptions());
+}
+
+static void AddFID(FIDMap &FIDs, llvm::SmallVectorImpl<FileID> &V,
+ const SourceManager* SM, SourceLocation L) {
+
+ FileID FID = SM->getFileID(SM->getInstantiationLoc(L));
+ FIDMap::iterator I = FIDs.find(FID);
+ if (I != FIDs.end()) return;
+ FIDs[FID] = V.size();
+ V.push_back(FID);
+}
+
+static unsigned GetFID(const FIDMap& FIDs, const SourceManager &SM,
+ SourceLocation L) {
+ FileID FID = SM.getFileID(SM.getInstantiationLoc(L));
+ FIDMap::const_iterator I = FIDs.find(FID);
+ assert(I != FIDs.end());
+ return I->second;
+}
+
+static llvm::raw_ostream& Indent(llvm::raw_ostream& o, const unsigned indent) {
+ for (unsigned i = 0; i < indent; ++i) o << ' ';
+ return o;
+}
+
+static void EmitLocation(llvm::raw_ostream& o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation L, const FIDMap &FM,
+ unsigned indent, bool extend = false) {
+
+ FullSourceLoc Loc(SM.getInstantiationLoc(L), const_cast<SourceManager&>(SM));
+
+ // Add in the length of the token, so that we cover multi-char tokens.
+ unsigned offset =
+ extend ? Lexer::MeasureTokenLength(Loc, SM, LangOpts) - 1 : 0;
+
+ Indent(o, indent) << "<dict>\n";
+ Indent(o, indent) << " <key>line</key><integer>"
+ << Loc.getInstantiationLineNumber() << "</integer>\n";
+ Indent(o, indent) << " <key>col</key><integer>"
+ << Loc.getInstantiationColumnNumber() + offset << "</integer>\n";
+ Indent(o, indent) << " <key>file</key><integer>"
+ << GetFID(FM, SM, Loc) << "</integer>\n";
+ Indent(o, indent) << "</dict>\n";
+}
+
+static void EmitLocation(llvm::raw_ostream& o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const PathDiagnosticLocation &L, const FIDMap& FM,
+ unsigned indent, bool extend = false) {
+ EmitLocation(o, SM, LangOpts, L.asLocation(), FM, indent, extend);
+}
+
+static void EmitRange(llvm::raw_ostream& o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ PathDiagnosticRange R, const FIDMap &FM,
+ unsigned indent) {
+ Indent(o, indent) << "<array>\n";
+ EmitLocation(o, SM, LangOpts, R.getBegin(), FM, indent+1);
+ EmitLocation(o, SM, LangOpts, R.getEnd(), FM, indent+1, !R.isPoint);
+ Indent(o, indent) << "</array>\n";
+}
+
+static llvm::raw_ostream& EmitString(llvm::raw_ostream& o,
+ const std::string& s) {
+ o << "<string>";
+ for (std::string::const_iterator I=s.begin(), E=s.end(); I!=E; ++I) {
+ char c = *I;
+ switch (c) {
+ default: o << c; break;
+ case '&': o << "&amp;"; break;
+ case '<': o << "&lt;"; break;
+ case '>': o << "&gt;"; break;
+ case '\'': o << "&apos;"; break;
+ case '\"': o << "&quot;"; break;
+ }
+ }
+ o << "</string>";
+ return o;
+}
+
+static void ReportControlFlow(llvm::raw_ostream& o,
+ const PathDiagnosticControlFlowPiece& P,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent) {
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ Indent(o, indent) << "<key>kind</key><string>control</string>\n";
+
+ // Emit edges.
+ Indent(o, indent) << "<key>edges</key>\n";
+ ++indent;
+ Indent(o, indent) << "<array>\n";
+ ++indent;
+ for (PathDiagnosticControlFlowPiece::const_iterator I=P.begin(), E=P.end();
+ I!=E; ++I) {
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+ Indent(o, indent) << "<key>start</key>\n";
+ EmitRange(o, SM, LangOpts, I->getStart().asRange(), FM, indent+1);
+ Indent(o, indent) << "<key>end</key>\n";
+ EmitRange(o, SM, LangOpts, I->getEnd().asRange(), FM, indent+1);
+ --indent;
+ Indent(o, indent) << "</dict>\n";
+ }
+ --indent;
+ Indent(o, indent) << "</array>\n";
+ --indent;
+
+ // Output any helper text.
+ const std::string& s = P.getString();
+ if (!s.empty()) {
+ Indent(o, indent) << "<key>alternate</key>";
+ EmitString(o, s) << '\n';
+ }
+
+ --indent;
+ Indent(o, indent) << "</dict>\n";
+}
+
+static void ReportEvent(llvm::raw_ostream& o, const PathDiagnosticPiece& P,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent) {
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ Indent(o, indent) << "<key>kind</key><string>event</string>\n";
+
+ // Output the location.
+ FullSourceLoc L = P.getLocation().asLocation();
+
+ Indent(o, indent) << "<key>location</key>\n";
+ EmitLocation(o, SM, LangOpts, L, FM, indent);
+
+ // Output the ranges (if any).
+ PathDiagnosticPiece::range_iterator RI = P.ranges_begin(),
+ RE = P.ranges_end();
+
+ if (RI != RE) {
+ Indent(o, indent) << "<key>ranges</key>\n";
+ Indent(o, indent) << "<array>\n";
+ ++indent;
+ for (; RI != RE; ++RI)
+ EmitRange(o, SM, LangOpts, *RI, FM, indent+1);
+ --indent;
+ Indent(o, indent) << "</array>\n";
+ }
+
+ // Output the text.
+ assert(!P.getString().empty());
+ Indent(o, indent) << "<key>extended_message</key>\n";
+ Indent(o, indent);
+ EmitString(o, P.getString()) << '\n';
+
+ // Output the short text.
+ // FIXME: Really use a short string.
+ Indent(o, indent) << "<key>message</key>\n";
+ EmitString(o, P.getString()) << '\n';
+
+ // Finish up.
+ --indent;
+ Indent(o, indent); o << "</dict>\n";
+}
+
+static void ReportMacro(llvm::raw_ostream& o,
+ const PathDiagnosticMacroPiece& P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent) {
+
+ for (PathDiagnosticMacroPiece::const_iterator I=P.begin(), E=P.end();
+ I!=E; ++I) {
+
+ switch ((*I)->getKind()) {
+ default:
+ break;
+ case PathDiagnosticPiece::Event:
+ ReportEvent(o, cast<PathDiagnosticEventPiece>(**I), FM, SM, LangOpts,
+ indent);
+ break;
+ case PathDiagnosticPiece::Macro:
+ ReportMacro(o, cast<PathDiagnosticMacroPiece>(**I), FM, SM, LangOpts,
+ indent);
+ break;
+ }
+ }
+}
+
+static void ReportDiag(llvm::raw_ostream& o, const PathDiagnosticPiece& P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+
+ unsigned indent = 4;
+
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::ControlFlow:
+ ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
+ LangOpts, indent);
+ break;
+ case PathDiagnosticPiece::Event:
+ ReportEvent(o, cast<PathDiagnosticEventPiece>(P), FM, SM, LangOpts,
+ indent);
+ break;
+ case PathDiagnosticPiece::Macro:
+ ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
+ indent);
+ break;
+ }
+}
+
+void PlistDiagnostics::HandlePathDiagnostic(const PathDiagnostic* D) {
+ if (!D)
+ return;
+
+ if (D->empty()) {
+ delete D;
+ return;
+ }
+
+ // We need to flatten the locations (convert Stmt* to locations) because
+ // the referenced statements may be freed by the time the diagnostics
+ // are emitted.
+ const_cast<PathDiagnostic*>(D)->flattenLocations();
+ BatchedDiags.push_back(D);
+}
+
+PlistDiagnostics::~PlistDiagnostics() {
+
+ // Build up a set of FIDs that we use by scanning the locations and
+ // ranges of the diagnostics.
+ FIDMap FM;
+ llvm::SmallVector<FileID, 10> Fids;
+ const SourceManager* SM = 0;
+
+ if (!BatchedDiags.empty())
+ SM = &(*BatchedDiags.begin())->begin()->getLocation().getManager();
+
+ for (std::vector<const PathDiagnostic*>::iterator DI = BatchedDiags.begin(),
+ DE = BatchedDiags.end(); DI != DE; ++DI) {
+
+ const PathDiagnostic *D = *DI;
+
+ for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I!=E; ++I) {
+ AddFID(FM, Fids, SM, I->getLocation().asLocation());
+
+ for (PathDiagnosticPiece::range_iterator RI=I->ranges_begin(),
+ RE=I->ranges_end(); RI!=RE; ++RI) {
+ AddFID(FM, Fids, SM, RI->getBegin());
+ AddFID(FM, Fids, SM, RI->getEnd());
+ }
+ }
+ }
+
+ // Open the file.
+ std::string ErrMsg;
+ llvm::raw_fd_ostream o(OutputFile.c_str(), false, ErrMsg);
+ if (!ErrMsg.empty()) {
+ llvm::errs() << "warning: could not creat file: " << OutputFile << '\n';
+ return;
+ }
+
+ // Write the plist header.
+ o << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" "
+ "http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
+ "<plist version=\"1.0\">\n";
+
+ // Write the root object: a <dict> containing...
+ // - "files", an <array> mapping from FIDs to file names
+ // - "diagnostics", an <array> containing the path diagnostics
+ o << "<dict>\n"
+ " <key>files</key>\n"
+ " <array>\n";
+
+ for (llvm::SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end();
+ I!=E; ++I) {
+ o << " ";
+ EmitString(o, SM->getFileEntryForID(*I)->getName()) << '\n';
+ }
+
+ o << " </array>\n"
+ " <key>diagnostics</key>\n"
+ " <array>\n";
+
+ for (std::vector<const PathDiagnostic*>::iterator DI=BatchedDiags.begin(),
+ DE = BatchedDiags.end(); DI!=DE; ++DI) {
+
+ o << " <dict>\n"
+ " <key>path</key>\n";
+
+ const PathDiagnostic *D = *DI;
+ // Create an owning smart pointer for 'D' just so that we auto-free it
+ // when we exit this method.
+ llvm::OwningPtr<PathDiagnostic> OwnedD(const_cast<PathDiagnostic*>(D));
+
+ o << " <array>\n";
+
+ for (PathDiagnostic::const_iterator I=D->begin(), E=D->end(); I != E; ++I)
+ ReportDiag(o, *I, FM, *SM, LangOpts);
+
+ o << " </array>\n";
+
+ // Output the bug type and bug category.
+ o << " <key>description</key>";
+ EmitString(o, D->getDescription()) << '\n';
+ o << " <key>category</key>";
+ EmitString(o, D->getCategory()) << '\n';
+ o << " <key>type</key>";
+ EmitString(o, D->getBugType()) << '\n';
+
+ // Output the location of the bug.
+ o << " <key>location</key>\n";
+ EmitLocation(o, *SM, LangOpts, D->getLocation(), FM, 2);
+
+ // Close up the entry.
+ o << " </dict>\n";
+ }
+
+ o << " </array>\n";
+
+ // Finish.
+ o << "</dict>\n</plist>";
+}
diff --git a/lib/Frontend/PrintParserCallbacks.cpp b/lib/Frontend/PrintParserCallbacks.cpp
new file mode 100644
index 0000000..f02d5d4
--- /dev/null
+++ b/lib/Frontend/PrintParserCallbacks.cpp
@@ -0,0 +1,831 @@
+//===--- PrintParserActions.cpp - Implement -parse-print-callbacks mode ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code simply runs the preprocessor on the input file and prints out the
+// result. This is the traditional behavior of the -E option.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Parse/Action.h"
+#include "clang/Parse/DeclSpec.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ class ParserPrintActions : public MinimalAction {
+ llvm::raw_ostream& Out;
+
+ public:
+ ParserPrintActions(Preprocessor &PP, llvm::raw_ostream& OS)
+ : MinimalAction(PP), Out(OS) {}
+
+ // Printing Functions which also must call MinimalAction
+
+ /// ActOnDeclarator - This callback is invoked when a declarator is parsed
+ /// and 'Init' specifies the initializer if any. This is for things like:
+ /// "int X = 4" or "typedef int foo".
+ virtual DeclPtrTy ActOnDeclarator(Scope *S, Declarator &D) {
+ Out << __FUNCTION__ << " ";
+ if (IdentifierInfo *II = D.getIdentifier()) {
+ Out << "'" << II->getName() << "'";
+ } else {
+ Out << "<anon>";
+ }
+ Out << "\n";
+
+ // Pass up to EmptyActions so that the symbol table is maintained right.
+ return MinimalAction::ActOnDeclarator(S, D);
+ }
+ /// ActOnPopScope - This callback is called immediately before the specified
+ /// scope is popped and deleted.
+ virtual void ActOnPopScope(SourceLocation Loc, Scope *S) {
+ Out << __FUNCTION__ << "\n";
+ return MinimalAction::ActOnPopScope(Loc, S);
+ }
+
+ /// ActOnTranslationUnitScope - This callback is called once, immediately
+ /// after creating the translation unit scope (in Parser::Initialize).
+ virtual void ActOnTranslationUnitScope(SourceLocation Loc, Scope *S) {
+ Out << __FUNCTION__ << "\n";
+ MinimalAction::ActOnTranslationUnitScope(Loc, S);
+ }
+
+
+ Action::DeclPtrTy ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *SuperName,
+ SourceLocation SuperLoc,
+ const DeclPtrTy *ProtoRefs,
+ unsigned NumProtocols,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList) {
+ Out << __FUNCTION__ << "\n";
+ return MinimalAction::ActOnStartClassInterface(AtInterfaceLoc,
+ ClassName, ClassLoc,
+ SuperName, SuperLoc,
+ ProtoRefs, NumProtocols,
+ EndProtoLoc, AttrList);
+ }
+
+ /// ActOnForwardClassDeclaration -
+ /// Scope will always be top level file scope.
+ Action::DeclPtrTy ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
+ IdentifierInfo **IdentList,
+ unsigned NumElts) {
+ Out << __FUNCTION__ << "\n";
+ return MinimalAction::ActOnForwardClassDeclaration(AtClassLoc, IdentList,
+ NumElts);
+ }
+
+ // Pure Printing
+
+ /// ActOnParamDeclarator - This callback is invoked when a parameter
+ /// declarator is parsed. This callback only occurs for functions
+ /// with prototypes. S is the function prototype scope for the
+ /// parameters (C++ [basic.scope.proto]).
+ virtual DeclPtrTy ActOnParamDeclarator(Scope *S, Declarator &D) {
+ Out << __FUNCTION__ << " ";
+ if (IdentifierInfo *II = D.getIdentifier()) {
+ Out << "'" << II->getName() << "'";
+ } else {
+ Out << "<anon>";
+ }
+ Out << "\n";
+ return DeclPtrTy();
+ }
+
+ /// AddInitializerToDecl - This action is called immediately after
+ /// ParseDeclarator (when an initializer is present). The code is factored
+ /// this way to make sure we are able to handle the following:
+ /// void func() { int xx = xx; }
+ /// This allows ActOnDeclarator to register "xx" prior to parsing the
+ /// initializer. The declaration above should still result in a warning,
+ /// since the reference to "xx" is uninitialized.
+ virtual void AddInitializerToDecl(DeclPtrTy Dcl, FullExprArg Init) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ /// FinalizeDeclaratorGroup - After a sequence of declarators are parsed,
+ /// this gives the actions implementation a chance to process the group as
+ /// a whole.
+ virtual DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec& DS,
+ DeclPtrTy *Group,
+ unsigned NumDecls) {
+ Out << __FUNCTION__ << "\n";
+ return DeclGroupPtrTy();
+ }
+
+ /// ActOnStartOfFunctionDef - This is called at the start of a function
+ /// definition, instead of calling ActOnDeclarator. The Declarator includes
+ /// information about formal arguments that are part of this function.
+ virtual DeclPtrTy ActOnStartOfFunctionDef(Scope *FnBodyScope,
+ Declarator &D){
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ /// ActOnStartOfFunctionDef - This is called at the start of a function
+ /// definition, after the FunctionDecl has already been created.
+ virtual DeclPtrTy ActOnStartOfFunctionDef(Scope *FnBodyScope, DeclPtrTy D) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual void ActOnStartOfObjCMethodDef(Scope *FnBodyScope, DeclPtrTy D) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ /// ActOnFunctionDefBody - This is called when a function body has completed
+ /// parsing. Decl is the DeclTy returned by ParseStartOfFunctionDef.
+ virtual DeclPtrTy ActOnFinishFunctionBody(DeclPtrTy Decl, StmtArg Body) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual DeclPtrTy ActOnFileScopeAsmDecl(SourceLocation Loc,
+ ExprArg AsmString) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ /// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
+ /// no declarator (e.g. "struct foo;") is parsed.
+ virtual DeclPtrTy ParsedFreeStandingDeclSpec(Scope *S, DeclSpec &DS) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ /// ActOnLinkageSpec - Parsed a C++ linkage-specification that
+ /// contained braces. Lang/StrSize contains the language string that
+ /// was parsed at location Loc. Decls/NumDecls provides the
+ /// declarations parsed inside the linkage specification.
+ virtual DeclPtrTy ActOnLinkageSpec(SourceLocation Loc,
+ SourceLocation LBrace,
+ SourceLocation RBrace, const char *Lang,
+ unsigned StrSize,
+ DeclPtrTy *Decls, unsigned NumDecls) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ /// ActOnLinkageSpec - Parsed a C++ linkage-specification without
+ /// braces. Lang/StrSize contains the language string that was
+ /// parsed at location Loc. D is the declaration parsed.
+ virtual DeclPtrTy ActOnLinkageSpec(SourceLocation Loc, const char *Lang,
+ unsigned StrSize, DeclPtrTy D) {
+ return DeclPtrTy();
+ }
+
+ //===------------------------------------------------------------------===//
+ // Type Parsing Callbacks.
+ //===------------------------------------------------------------------===//
+
+ virtual TypeResult ActOnTypeName(Scope *S, Declarator &D) {
+ Out << __FUNCTION__ << "\n";
+ return TypeResult();
+ }
+
+ virtual DeclPtrTy ActOnTag(Scope *S, unsigned TagType, TagKind TK,
+ SourceLocation KWLoc, const CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr, AccessSpecifier AS,
+ bool &Owned) {
+ // TagType is an instance of DeclSpec::TST, indicating what kind of tag this
+ // is (struct/union/enum/class).
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ /// Act on @defs() element found when parsing a structure. ClassName is the
+ /// name of the referenced class.
+ virtual void ActOnDefs(Scope *S, DeclPtrTy TagD, SourceLocation DeclStart,
+ IdentifierInfo *ClassName,
+ llvm::SmallVectorImpl<DeclPtrTy> &Decls) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual DeclPtrTy ActOnField(Scope *S, DeclPtrTy TagD,
+ SourceLocation DeclStart,
+ Declarator &D, ExprTy *BitfieldWidth) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual DeclPtrTy ActOnIvar(Scope *S, SourceLocation DeclStart,
+ Declarator &D, ExprTy *BitfieldWidth,
+ tok::ObjCKeywordKind visibility) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual void ActOnFields(Scope* S, SourceLocation RecLoc, DeclPtrTy TagDecl,
+ DeclPtrTy *Fields, unsigned NumFields,
+ SourceLocation LBrac, SourceLocation RBrac,
+ AttributeList *AttrList) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual DeclPtrTy ActOnEnumConstant(Scope *S, DeclPtrTy EnumDecl,
+ DeclPtrTy LastEnumConstant,
+ SourceLocation IdLoc,IdentifierInfo *Id,
+ SourceLocation EqualLoc, ExprTy *Val) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
+ SourceLocation RBraceLoc, DeclPtrTy EnumDecl,
+ DeclPtrTy *Elements, unsigned NumElements) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ //===------------------------------------------------------------------===//
+ // Statement Parsing Callbacks.
+ //===------------------------------------------------------------------===//
+
+ virtual OwningStmtResult ActOnNullStmt(SourceLocation SemiLoc) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnCompoundStmt(SourceLocation L,
+ SourceLocation R,
+ MultiStmtArg Elts,
+ bool isStmtExpr) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnExprStmt(FullExprArg Expr) {
+ Out << __FUNCTION__ << "\n";
+ return OwningStmtResult(*this, Expr->release());
+ }
+
+ /// ActOnCaseStmt - Note that this handles the GNU 'case 1 ... 4' extension,
+ /// which can specify an RHS value.
+ virtual OwningStmtResult ActOnCaseStmt(SourceLocation CaseLoc,
+ ExprArg LHSVal,
+ SourceLocation DotDotDotLoc,
+ ExprArg RHSVal,
+ SourceLocation ColonLoc) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
+ SourceLocation ColonLoc,
+ StmtArg SubStmt, Scope *CurScope){
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnLabelStmt(SourceLocation IdentLoc,
+ IdentifierInfo *II,
+ SourceLocation ColonLoc,
+ StmtArg SubStmt) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnIfStmt(SourceLocation IfLoc,
+ FullExprArg CondVal, StmtArg ThenVal,
+ SourceLocation ElseLoc,
+ StmtArg ElseVal) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnStartOfSwitchStmt(ExprArg Cond) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
+ StmtArg Switch,
+ StmtArg Body) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnWhileStmt(SourceLocation WhileLoc,
+ FullExprArg Cond, StmtArg Body) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnDoStmt(SourceLocation DoLoc, StmtArg Body,
+ SourceLocation WhileLoc, ExprArg Cond){
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnForStmt(SourceLocation ForLoc,
+ SourceLocation LParenLoc,
+ StmtArg First, ExprArg Second,
+ ExprArg Third, SourceLocation RParenLoc,
+ StmtArg Body) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnObjCForCollectionStmt(
+ SourceLocation ForColLoc,
+ SourceLocation LParenLoc,
+ StmtArg First, ExprArg Second,
+ SourceLocation RParenLoc, StmtArg Body) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnGotoStmt(SourceLocation GotoLoc,
+ SourceLocation LabelLoc,
+ IdentifierInfo *LabelII) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
+ SourceLocation StarLoc,
+ ExprArg DestExp) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnContinueStmt(SourceLocation ContinueLoc,
+ Scope *CurScope) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnBreakStmt(SourceLocation GotoLoc,
+ Scope *CurScope) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnReturnStmt(SourceLocation ReturnLoc,
+ FullExprArg RetValExp) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+ virtual OwningStmtResult ActOnAsmStmt(SourceLocation AsmLoc,
+ bool IsSimple,
+ bool IsVolatile,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ std::string *Names,
+ MultiExprArg Constraints,
+ MultiExprArg Exprs,
+ ExprArg AsmString,
+ MultiExprArg Clobbers,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ // Objective-c statements
+ virtual OwningStmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParen,
+ DeclPtrTy Parm, StmtArg Body,
+ StmtArg CatchList) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc,
+ StmtArg Body) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc,
+ StmtArg Try, StmtArg Catch,
+ StmtArg Finally) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc,
+ ExprArg Throw,
+ Scope *CurScope) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
+ ExprArg SynchExpr,
+ StmtArg SynchBody) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ // C++ Statements
+ virtual DeclPtrTy ActOnExceptionDeclarator(Scope *S, Declarator &D) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual OwningStmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
+ DeclPtrTy ExceptionDecl,
+ StmtArg HandlerBlock) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ virtual OwningStmtResult ActOnCXXTryBlock(SourceLocation TryLoc,
+ StmtArg TryBlock,
+ MultiStmtArg Handlers) {
+ Out << __FUNCTION__ << "\n";
+ return StmtEmpty();
+ }
+
+ //===------------------------------------------------------------------===//
+ // Expression Parsing Callbacks.
+ //===------------------------------------------------------------------===//
+
+ // Primary Expressions.
+
+ /// ActOnIdentifierExpr - Parse an identifier in expression context.
+ /// 'HasTrailingLParen' indicates whether or not the identifier has a '('
+ /// token immediately after it.
+ virtual OwningExprResult ActOnIdentifierExpr(Scope *S, SourceLocation Loc,
+ IdentifierInfo &II,
+ bool HasTrailingLParen,
+ const CXXScopeSpec *SS,
+ bool isAddressOfOperand) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXOperatorFunctionIdExpr(
+ Scope *S, SourceLocation OperatorLoc,
+ OverloadedOperatorKind Op,
+ bool HasTrailingLParen, const CXXScopeSpec &SS,
+ bool isAddressOfOperand) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXConversionFunctionExpr(
+ Scope *S, SourceLocation OperatorLoc,
+ TypeTy *Type, bool HasTrailingLParen,
+ const CXXScopeSpec &SS,bool isAddressOfOperand) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnPredefinedExpr(SourceLocation Loc,
+ tok::TokenKind Kind) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCharacterConstant(const Token &) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnNumericConstant(const Token &) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ /// ActOnStringLiteral - The specified tokens were lexed as pasted string
+ /// fragments (e.g. "foo" "bar" L"baz").
+ virtual OwningExprResult ActOnStringLiteral(const Token *Toks,
+ unsigned NumToks) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnParenExpr(SourceLocation L, SourceLocation R,
+ ExprArg Val) {
+ Out << __FUNCTION__ << "\n";
+ return move(Val); // Default impl returns operand.
+ }
+
+ // Postfix Expressions.
+ virtual OwningExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Kind,
+ ExprArg Input) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+ virtual OwningExprResult ActOnArraySubscriptExpr(Scope *S, ExprArg Base,
+ SourceLocation LLoc,
+ ExprArg Idx,
+ SourceLocation RLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+ virtual OwningExprResult ActOnMemberReferenceExpr(Scope *S, ExprArg Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ SourceLocation MemberLoc,
+ IdentifierInfo &Member,
+ DeclPtrTy ImplDecl) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCallExpr(Scope *S, ExprArg Fn,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ // Unary Operators. 'Tok' is the token for the operator.
+ virtual OwningExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Op, ExprArg Input) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+ virtual OwningExprResult
+ ActOnSizeOfAlignOfExpr(SourceLocation OpLoc, bool isSizeof, bool isType,
+ void *TyOrEx, const SourceRange &ArgRange) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCompoundLiteral(SourceLocation LParen,
+ TypeTy *Ty,
+ SourceLocation RParen,
+ ExprArg Op) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+ virtual OwningExprResult ActOnInitList(SourceLocation LParenLoc,
+ MultiExprArg InitList,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+ virtual OwningExprResult ActOnCastExpr(SourceLocation LParenLoc, TypeTy *Ty,
+ SourceLocation RParenLoc,ExprArg Op){
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
+ tok::TokenKind Kind,
+ ExprArg LHS, ExprArg RHS) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
+ /// in the case of a the GNU conditional expr extension.
+ virtual OwningExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
+ SourceLocation ColonLoc,
+ ExprArg Cond, ExprArg LHS,
+ ExprArg RHS) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ //===--------------------- GNU Extension Expressions ------------------===//
+
+ virtual OwningExprResult ActOnAddrLabel(SourceLocation OpLoc,
+ SourceLocation LabLoc,
+ IdentifierInfo *LabelII) {// "&&foo"
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnStmtExpr(SourceLocation LPLoc,
+ StmtArg SubStmt,
+ SourceLocation RPLoc) { // "({..})"
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnBuiltinOffsetOf(Scope *S,
+ SourceLocation BuiltinLoc,
+ SourceLocation TypeLoc,
+ TypeTy *Arg1,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ // __builtin_types_compatible_p(type1, type2)
+ virtual OwningExprResult ActOnTypesCompatibleExpr(SourceLocation BuiltinLoc,
+ TypeTy *arg1,TypeTy *arg2,
+ SourceLocation RPLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+ // __builtin_choose_expr(constExpr, expr1, expr2)
+ virtual OwningExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
+ ExprArg cond, ExprArg expr1,
+ ExprArg expr2,
+ SourceLocation RPLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ // __builtin_va_arg(expr, type)
+ virtual OwningExprResult ActOnVAArg(SourceLocation BuiltinLoc,
+ ExprArg expr, TypeTy *type,
+ SourceLocation RPLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnGNUNullExpr(SourceLocation TokenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual void ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual OwningExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc,
+ StmtArg Body,
+ Scope *CurScope) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual DeclPtrTy ActOnStartNamespaceDef(Scope *S, SourceLocation IdentLoc,
+ IdentifierInfo *Ident,
+ SourceLocation LBrace) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual void ActOnFinishNamespaceDef(DeclPtrTy Dcl, SourceLocation RBrace) {
+ Out << __FUNCTION__ << "\n";
+ return;
+ }
+
+#if 0
+ // FIXME: AttrList should be deleted by this function, but the definition
+ // would have to be available.
+ virtual DeclPtrTy ActOnUsingDirective(Scope *CurScope,
+ SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ AttributeList *AttrList) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+#endif
+
+ virtual void ActOnParamDefaultArgument(DeclPtrTy param,
+ SourceLocation EqualLoc,
+ ExprArg defarg) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual void ActOnParamUnparsedDefaultArgument(DeclPtrTy param,
+ SourceLocation EqualLoc) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual void ActOnParamDefaultArgumentError(DeclPtrTy param) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual void AddCXXDirectInitializerToDecl(DeclPtrTy Dcl,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return;
+ }
+
+ virtual void ActOnStartDelayedCXXMethodDeclaration(Scope *S,
+ DeclPtrTy Method)
+ {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual void ActOnDelayedCXXMethodParameter(Scope *S, DeclPtrTy Param) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual void ActOnFinishDelayedCXXMethodDeclaration(Scope *S,
+ DeclPtrTy Method) {
+ Out << __FUNCTION__ << "\n";
+ }
+
+ virtual DeclPtrTy ActOnStaticAssertDeclaration(SourceLocation AssertLoc,
+ ExprArg AssertExpr,
+ ExprArg AssertMessageExpr) {
+ Out << __FUNCTION__ << "\n";
+ return DeclPtrTy();
+ }
+
+ virtual OwningExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
+ tok::TokenKind Kind,
+ SourceLocation LAngleBracketLoc,
+ TypeTy *Ty,
+ SourceLocation RAngleBracketLoc,
+ SourceLocation LParenLoc,
+ ExprArg Op,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXTypeid(SourceLocation OpLoc,
+ SourceLocation LParenLoc,
+ bool isType, void *TyOrExpr,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXThis(SourceLocation ThisLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc,
+ tok::TokenKind Kind) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXThrow(SourceLocation OpLoc, ExprArg Op) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXTypeConstructExpr(SourceRange TypeRange,
+ TypeTy *TypeRep,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXConditionDeclarationExpr(Scope *S,
+ SourceLocation StartLoc,
+ Declarator &D,
+ SourceLocation EqualLoc,
+ ExprArg AssignExprVal) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXNew(SourceLocation StartLoc,
+ bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ bool ParenTypeId, Declarator &D,
+ SourceLocation ConstructorLParen,
+ MultiExprArg ConstructorArgs,
+ SourceLocation ConstructorRParen) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnCXXDelete(SourceLocation StartLoc,
+ bool UseGlobal, bool ArrayForm,
+ ExprArg Operand) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+
+ virtual OwningExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ SourceLocation LParen,
+ TypeTy *Ty,
+ SourceLocation RParen) {
+ Out << __FUNCTION__ << "\n";
+ return ExprEmpty();
+ }
+ };
+}
+
+MinimalAction *clang::CreatePrintParserActionsAction(Preprocessor &PP,
+ llvm::raw_ostream* OS) {
+ return new ParserPrintActions(PP, *OS);
+}
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
new file mode 100644
index 0000000..89d099c
--- /dev/null
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -0,0 +1,470 @@
+//===--- PrintPreprocessedOutput.cpp - Implement the -E mode --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code simply runs the preprocessor on the input file and prints out the
+// result. This is the traditional behavior of the -E option.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/TokenConcatenation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Config/config.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+using namespace clang;
+
+/// PrintMacroDefinition - Print a macro definition in a form that will be
+/// properly accepted back as a definition.
+static void PrintMacroDefinition(const IdentifierInfo &II, const MacroInfo &MI,
+ Preprocessor &PP, llvm::raw_ostream &OS) {
+ OS << "#define " << II.getName();
+
+ if (MI.isFunctionLike()) {
+ OS << '(';
+ if (MI.arg_empty())
+ ;
+ else if (MI.getNumArgs() == 1)
+ OS << (*MI.arg_begin())->getName();
+ else {
+ MacroInfo::arg_iterator AI = MI.arg_begin(), E = MI.arg_end();
+ OS << (*AI++)->getName();
+ while (AI != E)
+ OS << ',' << (*AI++)->getName();
+ }
+
+ if (MI.isVariadic()) {
+ if (!MI.arg_empty())
+ OS << ',';
+ OS << "...";
+ }
+ OS << ')';
+ }
+
+ // GCC always emits a space, even if the macro body is empty. However, do not
+ // want to emit two spaces if the first token has a leading space.
+ if (MI.tokens_empty() || !MI.tokens_begin()->hasLeadingSpace())
+ OS << ' ';
+
+ llvm::SmallVector<char, 128> SpellingBuffer;
+ for (MacroInfo::tokens_iterator I = MI.tokens_begin(), E = MI.tokens_end();
+ I != E; ++I) {
+ if (I->hasLeadingSpace())
+ OS << ' ';
+
+ // Make sure we have enough space in the spelling buffer.
+ if (I->getLength() < SpellingBuffer.size())
+ SpellingBuffer.resize(I->getLength());
+ const char *Buffer = SpellingBuffer.data();
+ unsigned SpellingLen = PP.getSpelling(*I, Buffer);
+ OS.write(Buffer, SpellingLen);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessed token printer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PrintPPOutputPPCallbacks : public PPCallbacks {
+ Preprocessor &PP;
+ TokenConcatenation ConcatInfo;
+public:
+ llvm::raw_ostream &OS;
+private:
+ unsigned CurLine;
+ bool EmittedTokensOnThisLine;
+ bool EmittedMacroOnThisLine;
+ SrcMgr::CharacteristicKind FileType;
+ llvm::SmallString<512> CurFilename;
+ bool Initialized;
+ bool DisableLineMarkers;
+ bool DumpDefines;
+public:
+ PrintPPOutputPPCallbacks(Preprocessor &pp, llvm::raw_ostream &os,
+ bool lineMarkers, bool defines)
+ : PP(pp), ConcatInfo(PP), OS(os), DisableLineMarkers(lineMarkers),
+ DumpDefines(defines) {
+ CurLine = 0;
+ CurFilename += "<uninit>";
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ FileType = SrcMgr::C_User;
+ Initialized = false;
+ }
+
+ void SetEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
+ bool hasEmittedTokensOnThisLine() const { return EmittedTokensOnThisLine; }
+
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType);
+ virtual void Ident(SourceLocation Loc, const std::string &str);
+ virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
+ const std::string &Str);
+
+
+ bool HandleFirstTokOnLine(Token &Tok);
+ bool MoveToLine(SourceLocation Loc);
+ bool AvoidConcat(const Token &PrevTok, const Token &Tok) {
+ return ConcatInfo.AvoidConcat(PrevTok, Tok);
+ }
+ void WriteLineInfo(unsigned LineNo, const char *Extra=0, unsigned ExtraLen=0);
+
+ /// MacroDefined - This hook is called whenever a macro definition is seen.
+ void MacroDefined(const IdentifierInfo *II, const MacroInfo *MI);
+
+};
+} // end anonymous namespace
+
+void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
+ const char *Extra,
+ unsigned ExtraLen) {
+ if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ }
+
+ OS << '#' << ' ' << LineNo << ' ' << '"';
+ OS.write(&CurFilename[0], CurFilename.size());
+ OS << '"';
+
+ if (ExtraLen)
+ OS.write(Extra, ExtraLen);
+
+ if (FileType == SrcMgr::C_System)
+ OS.write(" 3", 2);
+ else if (FileType == SrcMgr::C_ExternCSystem)
+ OS.write(" 3 4", 4);
+ OS << '\n';
+}
+
+/// MoveToLine - Move the output to the source line specified by the location
+/// object. We can do this by emitting some number of \n's, or be emitting a
+/// #line directive. This returns false if already at the specified line, true
+/// if some newlines were emitted.
+bool PrintPPOutputPPCallbacks::MoveToLine(SourceLocation Loc) {
+ unsigned LineNo = PP.getSourceManager().getInstantiationLineNumber(Loc);
+
+ if (DisableLineMarkers) {
+ if (LineNo == CurLine) return false;
+
+ CurLine = LineNo;
+
+ if (!EmittedTokensOnThisLine && !EmittedMacroOnThisLine)
+ return true;
+
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ return true;
+ }
+
+ // If this line is "close enough" to the original line, just print newlines,
+ // otherwise print a #line directive.
+ if (LineNo-CurLine <= 8) {
+ if (LineNo-CurLine == 1)
+ OS << '\n';
+ else if (LineNo == CurLine)
+ return false; // Spelling line moved, but instantiation line didn't.
+ else {
+ const char *NewLines = "\n\n\n\n\n\n\n\n";
+ OS.write(NewLines, LineNo-CurLine);
+ }
+ } else {
+ WriteLineInfo(LineNo, 0, 0);
+ }
+
+ CurLine = LineNo;
+ return true;
+}
+
+
+/// FileChanged - Whenever the preprocessor enters or exits a #include file
+/// it invokes this handler. Update our conception of the current source
+/// position.
+void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType) {
+ // Unless we are exiting a #include, make sure to skip ahead to the line the
+ // #include directive was at.
+ SourceManager &SourceMgr = PP.getSourceManager();
+ if (Reason == PPCallbacks::EnterFile) {
+ SourceLocation IncludeLoc = SourceMgr.getPresumedLoc(Loc).getIncludeLoc();
+ if (IncludeLoc.isValid())
+ MoveToLine(IncludeLoc);
+ } else if (Reason == PPCallbacks::SystemHeaderPragma) {
+ MoveToLine(Loc);
+
+ // TODO GCC emits the # directive for this directive on the line AFTER the
+ // directive and emits a bunch of spaces that aren't needed. Emulate this
+ // strange behavior.
+ }
+
+ Loc = SourceMgr.getInstantiationLoc(Loc);
+ // FIXME: Should use presumed line #!
+ CurLine = SourceMgr.getInstantiationLineNumber(Loc);
+
+ if (DisableLineMarkers) return;
+
+ CurFilename.clear();
+ CurFilename += SourceMgr.getPresumedLoc(Loc).getFilename();
+ Lexer::Stringify(CurFilename);
+ FileType = NewFileType;
+
+ if (!Initialized) {
+ WriteLineInfo(CurLine);
+ Initialized = true;
+ }
+
+ switch (Reason) {
+ case PPCallbacks::EnterFile:
+ WriteLineInfo(CurLine, " 1", 2);
+ break;
+ case PPCallbacks::ExitFile:
+ WriteLineInfo(CurLine, " 2", 2);
+ break;
+ case PPCallbacks::SystemHeaderPragma:
+ case PPCallbacks::RenameFile:
+ WriteLineInfo(CurLine);
+ break;
+ }
+}
+
+/// Ident - Handle #ident directives when read by the preprocessor.
+///
+void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, const std::string &S) {
+ MoveToLine(Loc);
+
+ OS.write("#ident ", strlen("#ident "));
+ OS.write(&S[0], S.size());
+ EmittedTokensOnThisLine = true;
+}
+
+/// MacroDefined - This hook is called whenever a macro definition is seen.
+void PrintPPOutputPPCallbacks::MacroDefined(const IdentifierInfo *II,
+ const MacroInfo *MI) {
+ // Only print out macro definitions in -dD mode.
+ if (!DumpDefines ||
+ // Ignore __FILE__ etc.
+ MI->isBuiltinMacro()) return;
+
+ MoveToLine(MI->getDefinitionLoc());
+ PrintMacroDefinition(*II, *MI, PP, OS);
+ EmittedMacroOnThisLine = true;
+}
+
+
+void PrintPPOutputPPCallbacks::PragmaComment(SourceLocation Loc,
+ const IdentifierInfo *Kind,
+ const std::string &Str) {
+ MoveToLine(Loc);
+ OS << "#pragma comment(" << Kind->getName();
+
+ if (!Str.empty()) {
+ OS << ", \"";
+
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ unsigned char Char = Str[i];
+ if (isprint(Char) && Char != '\\' && Char != '"')
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0'+ ((Char >> 6) & 7))
+ << (char)('0'+ ((Char >> 3) & 7))
+ << (char)('0'+ ((Char >> 0) & 7));
+ }
+ OS << '"';
+ }
+
+ OS << ')';
+ EmittedTokensOnThisLine = true;
+}
+
+
+/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
+/// is called for the first token on each new line. If this really is the start
+/// of a new logical line, handle it and return true, otherwise return false.
+/// This may not be the start of a logical line because the "start of line"
+/// marker is set for spelling lines, not instantiation ones.
+bool PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
+ // Figure out what line we went to and insert the appropriate number of
+ // newline characters.
+ if (!MoveToLine(Tok.getLocation()))
+ return false;
+
+ // Print out space characters so that the first token on a line is
+ // indented for easy reading.
+ const SourceManager &SourceMgr = PP.getSourceManager();
+ unsigned ColNo = SourceMgr.getInstantiationColumnNumber(Tok.getLocation());
+
+ // This hack prevents stuff like:
+ // #define HASH #
+ // HASH define foo bar
+ // From having the # character end up at column 1, which makes it so it
+ // is not handled as a #define next time through the preprocessor if in
+ // -fpreprocessed mode.
+ if (ColNo <= 1 && Tok.is(tok::hash))
+ OS << ' ';
+
+ // Otherwise, indent the appropriate number of spaces.
+ for (; ColNo > 1; --ColNo)
+ OS << ' ';
+
+ return true;
+}
+
+namespace {
+struct UnknownPragmaHandler : public PragmaHandler {
+ const char *Prefix;
+ PrintPPOutputPPCallbacks *Callbacks;
+
+ UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks)
+ : PragmaHandler(0), Prefix(prefix), Callbacks(callbacks) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &PragmaTok) {
+ // Figure out what line we went to and insert the appropriate number of
+ // newline characters.
+ Callbacks->MoveToLine(PragmaTok.getLocation());
+ Callbacks->OS.write(Prefix, strlen(Prefix));
+
+ // Read and print all of the pragma tokens.
+ while (PragmaTok.isNot(tok::eom)) {
+ if (PragmaTok.hasLeadingSpace())
+ Callbacks->OS << ' ';
+ std::string TokSpell = PP.getSpelling(PragmaTok);
+ Callbacks->OS.write(&TokSpell[0], TokSpell.size());
+ PP.LexUnexpandedToken(PragmaTok);
+ }
+ Callbacks->OS << '\n';
+ }
+};
+} // end anonymous namespace
+
+
+static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
+ PrintPPOutputPPCallbacks *Callbacks,
+ llvm::raw_ostream &OS) {
+ char Buffer[256];
+ Token PrevTok;
+ while (1) {
+
+ // If this token is at the start of a line, emit newlines if needed.
+ if (Tok.isAtStartOfLine() && Callbacks->HandleFirstTokOnLine(Tok)) {
+ // done.
+ } else if (Tok.hasLeadingSpace() ||
+ // If we haven't emitted a token on this line yet, PrevTok isn't
+ // useful to look at and no concatenation could happen anyway.
+ (Callbacks->hasEmittedTokensOnThisLine() &&
+ // Don't print "-" next to "-", it would form "--".
+ Callbacks->AvoidConcat(PrevTok, Tok))) {
+ OS << ' ';
+ }
+
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ OS.write(II->getName(), II->getLength());
+ } else if (Tok.isLiteral() && !Tok.needsCleaning() &&
+ Tok.getLiteralData()) {
+ OS.write(Tok.getLiteralData(), Tok.getLength());
+ } else if (Tok.getLength() < 256) {
+ const char *TokPtr = Buffer;
+ unsigned Len = PP.getSpelling(Tok, TokPtr);
+ OS.write(TokPtr, Len);
+ } else {
+ std::string S = PP.getSpelling(Tok);
+ OS.write(&S[0], S.size());
+ }
+ Callbacks->SetEmittedTokensOnThisLine();
+
+ if (Tok.is(tok::eof)) break;
+
+ PrevTok = Tok;
+ PP.Lex(Tok);
+ }
+}
+
+namespace {
+ struct SortMacrosByID {
+ typedef std::pair<IdentifierInfo*, MacroInfo*> id_macro_pair;
+ bool operator()(const id_macro_pair &LHS, const id_macro_pair &RHS) const {
+ return strcmp(LHS.first->getName(), RHS.first->getName()) < 0;
+ }
+ };
+}
+
+void clang::DoPrintMacros(Preprocessor &PP, llvm::raw_ostream *OS) {
+ // -dM mode just scans and ignores all tokens in the files, then dumps out
+ // the macro table at the end.
+ PP.EnterMainSourceFile();
+
+ Token Tok;
+ do PP.Lex(Tok);
+ while (Tok.isNot(tok::eof));
+
+ std::vector<std::pair<IdentifierInfo*, MacroInfo*> > MacrosByID;
+ for (Preprocessor::macro_iterator I = PP.macro_begin(), E = PP.macro_end();
+ I != E; ++I)
+ MacrosByID.push_back(*I);
+ std::sort(MacrosByID.begin(), MacrosByID.end(), SortMacrosByID());
+
+ for (unsigned i = 0, e = MacrosByID.size(); i != e; ++i) {
+ MacroInfo &MI = *MacrosByID[i].second;
+ // Ignore computed macros like __LINE__ and friends.
+ if (MI.isBuiltinMacro()) continue;
+
+ PrintMacroDefinition(*MacrosByID[i].first, MI, PP, *OS);
+ *OS << "\n";
+ }
+}
+
+/// DoPrintPreprocessedInput - This implements -E mode.
+///
+void clang::DoPrintPreprocessedInput(Preprocessor &PP, llvm::raw_ostream *OS,
+ bool EnableCommentOutput,
+ bool EnableMacroCommentOutput,
+ bool DisableLineMarkers,
+ bool DumpDefines) {
+ // Inform the preprocessor whether we want it to retain comments or not, due
+ // to -C or -CC.
+ PP.SetCommentRetentionState(EnableCommentOutput, EnableMacroCommentOutput);
+
+ OS->SetBufferSize(64*1024);
+
+ PrintPPOutputPPCallbacks *Callbacks =
+ new PrintPPOutputPPCallbacks(PP, *OS, DisableLineMarkers, DumpDefines);
+ PP.AddPragmaHandler(0, new UnknownPragmaHandler("#pragma", Callbacks));
+ PP.AddPragmaHandler("GCC", new UnknownPragmaHandler("#pragma GCC",
+ Callbacks));
+
+ PP.setPPCallbacks(Callbacks);
+
+ // After we have configured the preprocessor, enter the main file.
+ PP.EnterMainSourceFile();
+
+ // Consume all of the tokens that come from the predefines buffer. Those
+ // should not be emitted into the output and are guaranteed to be at the
+ // start.
+ const SourceManager &SourceMgr = PP.getSourceManager();
+ Token Tok;
+ do PP.Lex(Tok);
+ while (Tok.isNot(tok::eof) && Tok.getLocation().isFileID() &&
+ !strcmp(SourceMgr.getPresumedLoc(Tok.getLocation()).getFilename(),
+ "<built-in>"));
+
+ // Read all the preprocessed tokens, printing them out to the stream.
+ PrintPreprocessedTokens(PP, Tok, Callbacks, *OS);
+ *OS << '\n';
+}
+
diff --git a/lib/Frontend/RewriteBlocks.cpp b/lib/Frontend/RewriteBlocks.cpp
new file mode 100644
index 0000000..9d73d90
--- /dev/null
+++ b/lib/Frontend/RewriteBlocks.cpp
@@ -0,0 +1,1162 @@
+//===--- RewriteBlocks.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hacks and fun related to the closure rewriter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include <sstream>
+
+using namespace clang;
+using llvm::utostr;
+
+namespace {
+
+class RewriteBlocks : public ASTConsumer {
+ Rewriter Rewrite;
+ Diagnostic &Diags;
+ const LangOptions &LangOpts;
+ unsigned RewriteFailedDiag;
+
+ ASTContext *Context;
+ SourceManager *SM;
+ FileID MainFileID;
+ const char *MainFileStart, *MainFileEnd;
+
+ // Block expressions.
+ llvm::SmallVector<BlockExpr *, 32> Blocks;
+ llvm::SmallVector<BlockDeclRefExpr *, 32> BlockDeclRefs;
+ llvm::DenseMap<BlockDeclRefExpr *, CallExpr *> BlockCallExprs;
+
+ // Block related declarations.
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
+
+ llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
+
+ // The function/method we are rewriting.
+ FunctionDecl *CurFunctionDef;
+ ObjCMethodDecl *CurMethodDef;
+
+ bool IsHeader;
+
+ std::string Preamble;
+public:
+ RewriteBlocks(std::string inFile, Diagnostic &D,
+ const LangOptions &LOpts);
+ ~RewriteBlocks() {
+ // Get the buffer corresponding to MainFileID.
+ // If we haven't changed it, then we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ std::string S(RewriteBuf->begin(), RewriteBuf->end());
+ printf("%s\n", S.c_str());
+ } else {
+ printf("No changes\n");
+ }
+ }
+
+ void Initialize(ASTContext &context);
+
+ void InsertText(SourceLocation Loc, const char *StrData, unsigned StrLen);
+ void ReplaceText(SourceLocation Start, unsigned OrigLength,
+ const char *NewStr, unsigned NewLength);
+
+ // Top Level Driver code.
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ }
+ void HandleTopLevelSingleDecl(Decl *D);
+ void HandleDeclInMainFile(Decl *D);
+
+ // Top level
+ Stmt *RewriteFunctionBody(Stmt *S);
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ // Block specific rewrite rules.
+ std::string SynthesizeBlockInitExpr(BlockExpr *Exp, VarDecl *VD=0);
+
+ void RewriteBlockCall(CallExpr *Exp);
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ void RewriteBlockDeclRefExpr(BlockDeclRefExpr *VD);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ const char *funcName, std::string Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
+ const char *funcName, std::string Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ bool hasCopyDisposeHelpers);
+ std::string SynthesizeBlockCall(CallExpr *Exp);
+ void SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ const char *FunName);
+
+ void CollectBlockDeclRefInfo(BlockExpr *Exp);
+ void GetBlockCallExprs(Stmt *S);
+ void GetBlockDeclRefExprs(Stmt *S);
+
+ // We avoid calling Type::isBlockPointerType(), since it operates on the
+ // canonical type. We only care if the top-level type is a closure pointer.
+ bool isBlockPointerType(QualType T) { return isa<BlockPointerType>(T); }
+
+ // FIXME: This predicate seems like it would be useful to add to ASTContext.
+ bool isObjCType(QualType T) {
+ if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ return false;
+
+ QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
+
+ if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
+ OCT == Context->getCanonicalType(Context->getObjCClassType()))
+ return true;
+
+ if (const PointerType *PT = OCT->getAsPointerType()) {
+ if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
+ isa<ObjCQualifiedIdType>(PT->getPointeeType()))
+ return true;
+ }
+ return false;
+ }
+ // ObjC rewrite methods.
+ void RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl);
+ void RewriteCategoryDecl(ObjCCategoryDecl *CatDecl);
+ void RewriteProtocolDecl(ObjCProtocolDecl *PDecl);
+ void RewriteMethodDecl(ObjCMethodDecl *MDecl);
+
+ void RewriteFunctionProtoType(QualType funcType, NamedDecl *D);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+ void RewriteCastExpr(CastExpr *CE);
+
+ bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ void GetExtentOfArgList(const char *Name, const char *&LParen, const char *&RParen);
+};
+
+}
+
+static bool IsHeaderFile(const std::string &Filename) {
+ std::string::size_type DotPos = Filename.rfind('.');
+
+ if (DotPos == std::string::npos) {
+ // no file extension
+ return false;
+ }
+
+ std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ // C header: .h
+ // C++ header: .hh or .H;
+ return Ext == "h" || Ext == "hh" || Ext == "H";
+}
+
+RewriteBlocks::RewriteBlocks(std::string inFile,
+ Diagnostic &D, const LangOptions &LOpts) :
+ Diags(D), LangOpts(LOpts) {
+ IsHeader = IsHeaderFile(inFile);
+ CurFunctionDef = 0;
+ CurMethodDef = 0;
+ RewriteFailedDiag = Diags.getCustomDiagID(Diagnostic::Warning,
+ "rewriting failed");
+}
+
+ASTConsumer *clang::CreateBlockRewriter(const std::string& InFile,
+ Diagnostic &Diags,
+ const LangOptions &LangOpts) {
+ return new RewriteBlocks(InFile, Diags, LangOpts);
+}
+
+void RewriteBlocks::Initialize(ASTContext &context) {
+ Context = &context;
+ SM = &Context->getSourceManager();
+
+ // Get the ID and start/end of the main file.
+ MainFileID = SM->getMainFileID();
+ const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
+ MainFileStart = MainBuf->getBufferStart();
+ MainFileEnd = MainBuf->getBufferEnd();
+
+ Rewrite.setSourceMgr(Context->getSourceManager(), LangOpts);
+
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Size;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "enum {\n";
+ Preamble += " BLOCK_HAS_COPY_DISPOSE = (1<<25),\n";
+ Preamble += " BLOCK_IS_GLOBAL = (1<<28)\n";
+ Preamble += "};\n";
+ if (LangOpts.Microsoft)
+ Preamble += "#define __OBJC_RW_EXTERN extern \"C\" __declspec(dllimport)\n";
+ else
+ Preamble += "#define __OBJC_RW_EXTERN extern\n";
+ Preamble += "// Runtime copy/destroy helper functions\n";
+ Preamble += "__OBJC_RW_EXTERN void _Block_copy_assign(void *, void *);\n";
+ Preamble += "__OBJC_RW_EXTERN void _Block_byref_assign_copy(void *, void *);\n";
+ Preamble += "__OBJC_RW_EXTERN void _Block_destroy(void *);\n";
+ Preamble += "__OBJC_RW_EXTERN void _Block_byref_release(void *);\n";
+ Preamble += "__OBJC_RW_EXTERN void *_NSConcreteGlobalBlock;\n";
+ Preamble += "__OBJC_RW_EXTERN void *_NSConcreteStackBlock;\n";
+ Preamble += "#endif\n";
+
+ InsertText(SM->getLocForStartOfFile(MainFileID),
+ Preamble.c_str(), Preamble.size());
+}
+
+void RewriteBlocks::InsertText(SourceLocation Loc, const char *StrData,
+ unsigned StrLen)
+{
+ if (!Rewrite.InsertText(Loc, StrData, StrLen))
+ return;
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+}
+
+void RewriteBlocks::ReplaceText(SourceLocation Start, unsigned OrigLength,
+ const char *NewStr, unsigned NewLength) {
+ if (!Rewrite.ReplaceText(Start, OrigLength, NewStr, NewLength))
+ return;
+ Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
+}
+
+void RewriteBlocks::RewriteMethodDecl(ObjCMethodDecl *Method) {
+ bool haveBlockPtrs = false;
+ for (ObjCMethodDecl::param_iterator I = Method->param_begin(),
+ E = Method->param_end(); I != E; ++I)
+ if (isBlockPointerType((*I)->getType()))
+ haveBlockPtrs = true;
+
+ if (!haveBlockPtrs)
+ return;
+
+ // Do a fuzzy rewrite.
+ // We have 1 or more arguments that have closure pointers.
+ SourceLocation Loc = Method->getLocStart();
+ SourceLocation LocEnd = Method->getLocEnd();
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ const char *methodPtr = startBuf;
+ std::string Tag = "struct __block_impl *";
+
+ while (*methodPtr++ && (methodPtr != endBuf)) {
+ switch (*methodPtr) {
+ case ':':
+ methodPtr++;
+ if (*methodPtr == '(') {
+ const char *scanType = ++methodPtr;
+ bool foundBlockPointer = false;
+ unsigned parenCount = 1;
+
+ while (parenCount) {
+ switch (*scanType) {
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ case '^':
+ foundBlockPointer = true;
+ break;
+ }
+ scanType++;
+ }
+ if (foundBlockPointer) {
+ // advance the location to startArgList.
+ Loc = Loc.getFileLocWithOffset(methodPtr-startBuf);
+ assert((Loc.isValid()) && "Invalid Loc");
+ ReplaceText(Loc, scanType-methodPtr-1, Tag.c_str(), Tag.size());
+
+ // Advance startBuf. Since the underlying buffer has changed,
+ // it's very important to advance startBuf (so we can correctly
+ // compute a relative Loc the next time around).
+ startBuf = methodPtr;
+ }
+ // Advance the method ptr to the end of the type.
+ methodPtr = scanType;
+ }
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteBlocks::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
+ for (ObjCInterfaceDecl::instmeth_iterator
+ I = ClassDecl->instmeth_begin(*Context),
+ E = ClassDecl->instmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDecl(*I);
+ for (ObjCInterfaceDecl::classmeth_iterator
+ I = ClassDecl->classmeth_begin(*Context),
+ E = ClassDecl->classmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDecl(*I);
+}
+
+void RewriteBlocks::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
+ for (ObjCCategoryDecl::instmeth_iterator
+ I = CatDecl->instmeth_begin(*Context),
+ E = CatDecl->instmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDecl(*I);
+ for (ObjCCategoryDecl::classmeth_iterator
+ I = CatDecl->classmeth_begin(*Context),
+ E = CatDecl->classmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDecl(*I);
+}
+
+void RewriteBlocks::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(*Context),
+ E = PDecl->instmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDecl(*I);
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(*Context),
+ E = PDecl->classmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDecl(*I);
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver Code
+//===----------------------------------------------------------------------===//
+
+void RewriteBlocks::HandleTopLevelSingleDecl(Decl *D) {
+ // Two cases: either the decl could be in the main file, or it could be in a
+ // #included file. If the former, rewrite it now. If the later, check to see
+ // if we rewrote the #include/#import.
+ SourceLocation Loc = D->getLocation();
+ Loc = SM->getInstantiationLoc(Loc);
+
+ // If this is for a builtin, ignore it.
+ if (Loc.isInvalid()) return;
+
+ if (ObjCInterfaceDecl *MD = dyn_cast<ObjCInterfaceDecl>(D))
+ RewriteInterfaceDecl(MD);
+ else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D))
+ RewriteCategoryDecl(CD);
+ else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D))
+ RewriteProtocolDecl(PD);
+
+ // If we have a decl in the main file, see if we should rewrite it.
+ if (SM->isFromMainFile(Loc))
+ HandleDeclInMainFile(D);
+ return;
+}
+
+std::string RewriteBlocks::SynthesizeBlockFunc(BlockExpr *CE, int i,
+ const char *funcName,
+ std::string Tag) {
+ const FunctionType *AFT = CE->getFunctionType();
+ QualType RT = AFT->getResultType();
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static " + RT.getAsString() + " __" +
+ funcName + "_" + "block_func_" + utostr(i);
+
+ BlockDecl *BD = CE->getBlockDecl();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ S += "()";
+ } else if (BD->param_empty()) {
+ S += "(" + StructRef + " *__cself)";
+ } else {
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ assert(FT && "SynthesizeBlockFunc: No function proto");
+ S += '(';
+ // first add the implicit argument.
+ S += StructRef + " *__cself, ";
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) S += ", ";
+ ParamStr = (*AI)->getNameAsString();
+ (*AI)->getType().getAsStringInternal(ParamStr, Context->PrintingPolicy);
+ S += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) S += ", ";
+ S += "...";
+ }
+ S += ')';
+ }
+ S += " {\n";
+
+ // Create local declarations to avoid rewriting all closure decl ref exprs.
+ // First, emit a declaration for all "by ref" decls.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ Context->getPointerType((*I)->getType()).getAsStringInternal(Name,
+ Context->PrintingPolicy);
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
+ }
+ // Next, emit a declaration for all "by copy" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedClosure)(void);
+ // myImportedClosure = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherClosure)(void);
+ // anotherClosure = ^(void) {
+ // myImportedClosure(); // import and invoke the closure
+ // };
+ //
+ if (isBlockPointerType((*I)->getType()))
+ S += "struct __block_impl *";
+ else
+ (*I)->getType().getAsStringInternal(Name, Context->PrintingPolicy);
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ std::string RewrittenStr = RewrittenBlockExprs[CE];
+ const char *cstr = RewrittenStr.c_str();
+ while (*cstr++ != '{') ;
+ S += cstr;
+ S += "\n";
+ return S;
+}
+
+std::string RewriteBlocks::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ const char *funcName,
+ std::string Tag) {
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static void __";
+
+ S += funcName;
+ S += "_block_copy_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*dst, " + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ S += "_Block_copy_assign(&dst->";
+ S += (*I)->getNameAsString();
+ S += ", src->";
+ S += (*I)->getNameAsString();
+ S += ");}";
+ }
+ S += "\nstatic void __";
+ S += funcName;
+ S += "_block_dispose_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ S += "_Block_destroy(src->";
+ S += (*I)->getNameAsString();
+ S += ");";
+ }
+ S += "}\n";
+ return S;
+}
+
+std::string RewriteBlocks::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ bool hasCopyDisposeHelpers) {
+ std::string S = "struct " + Tag;
+ std::string Constructor = " " + Tag;
+
+ S += " {\n struct __block_impl impl;\n";
+
+ if (hasCopyDisposeHelpers)
+ S += " void *copy;\n void *dispose;\n";
+
+ Constructor += "(void *fp";
+
+ if (hasCopyDisposeHelpers)
+ Constructor += ", void *copyHelp, void *disposeHelp";
+
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ (*I)->getType().getAsStringInternal(FieldName, Context->PrintingPolicy);
+ (*I)->getType().getAsStringInternal(ArgName, Context->PrintingPolicy);
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + ";\n";
+ }
+ // Output all "by ref" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ Context->getPointerType((*I)->getType()).getAsStringInternal(FieldName,
+ Context->PrintingPolicy);
+ Context->getPointerType((*I)->getType()).getAsStringInternal(ArgName,
+ Context->PrintingPolicy);
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + "; // by ref\n";
+ }
+ // Finish writing the constructor.
+ // FIXME: handle NSConcreteGlobalBlock.
+ Constructor += ", int flags=0) {\n";
+ Constructor += " impl.isa = 0/*&_NSConcreteStackBlock*/;\n impl.Size = sizeof(";
+ Constructor += Tag + ");\n impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+
+ if (hasCopyDisposeHelpers)
+ Constructor += " copy = copyHelp;\n dispose = disposeHelp;\n";
+
+ // Initialize all "by copy" arguments.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ Constructor += " ";
+ if (isBlockPointerType((*I)->getType()))
+ Constructor += Name + " = (struct __block_impl *)_";
+ else
+ Constructor += Name + " = _";
+ Constructor += Name + ";\n";
+ }
+ // Initialize all "by ref" arguments.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ Constructor += " ";
+ if (isBlockPointerType((*I)->getType()))
+ Constructor += Name + " = (struct __block_impl *)_";
+ else
+ Constructor += Name + " = _";
+ Constructor += Name + ";\n";
+ }
+ } else {
+ // Finish writing the constructor.
+ // FIXME: handle NSConcreteGlobalBlock.
+ Constructor += ", int flags=0) {\n";
+ Constructor += " impl.isa = 0/*&_NSConcreteStackBlock*/;\n impl.Size = sizeof(";
+ Constructor += Tag + ");\n impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+ if (hasCopyDisposeHelpers)
+ Constructor += " copy = copyHelp;\n dispose = disposeHelp;\n";
+ }
+ Constructor += " ";
+ Constructor += "}\n";
+ S += Constructor;
+ S += "};\n";
+ return S;
+}
+
+void RewriteBlocks::SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ const char *FunName) {
+ // Insert closures that were part of the function.
+ for (unsigned i = 0; i < Blocks.size(); i++) {
+
+ CollectBlockDeclRefInfo(Blocks[i]);
+
+ std::string Tag = "__" + std::string(FunName) + "_block_impl_" + utostr(i);
+
+ std::string CI = SynthesizeBlockImpl(Blocks[i], Tag,
+ ImportedBlockDecls.size() > 0);
+
+ InsertText(FunLocStart, CI.c_str(), CI.size());
+
+ std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, Tag);
+
+ InsertText(FunLocStart, CF.c_str(), CF.size());
+
+ if (ImportedBlockDecls.size()) {
+ std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, Tag);
+ InsertText(FunLocStart, HF.c_str(), HF.size());
+ }
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByCopyDecls.clear();
+ BlockCallExprs.clear();
+ ImportedBlockDecls.clear();
+ }
+ Blocks.clear();
+ RewrittenBlockExprs.clear();
+}
+
+void RewriteBlocks::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const char *FuncName = FD->getNameAsCString();
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+void RewriteBlocks::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
+ SourceLocation FunLocStart = MD->getLocStart();
+ std::string FuncName = MD->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = FuncName.find(":", loc)) != std::string::npos)
+ FuncName.replace(loc, 1, "_");
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName.c_str());
+}
+
+void RewriteBlocks::GetBlockDeclRefExprs(Stmt *S) {
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
+ GetBlockDeclRefExprs(CBE->getBody());
+ else
+ GetBlockDeclRefExprs(*CI);
+ }
+ // Handle specific things.
+ if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(S))
+ // FIXME: Handle enums.
+ if (!isa<FunctionDecl>(CDRE->getDecl()))
+ BlockDeclRefs.push_back(CDRE);
+ return;
+}
+
+void RewriteBlocks::GetBlockCallExprs(Stmt *S) {
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
+ GetBlockCallExprs(CBE->getBody());
+ else
+ GetBlockCallExprs(*CI);
+ }
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ BlockCallExprs[dyn_cast<BlockDeclRefExpr>(CE->getCallee())] = CE;
+ }
+ }
+ return;
+}
+
+std::string RewriteBlocks::SynthesizeBlockCall(CallExpr *Exp) {
+ // Navigate to relevant type information.
+ const char *closureName = 0;
+ const BlockPointerType *CPT = 0;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp->getCallee())) {
+ closureName = DRE->getDecl()->getNameAsCString();
+ CPT = DRE->getType()->getAsBlockPointerType();
+ } else if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(Exp->getCallee())) {
+ closureName = CDRE->getDecl()->getNameAsCString();
+ CPT = CDRE->getType()->getAsBlockPointerType();
+ } else if (MemberExpr *MExpr = dyn_cast<MemberExpr>(Exp->getCallee())) {
+ closureName = MExpr->getMemberDecl()->getNameAsCString();
+ CPT = MExpr->getType()->getAsBlockPointerType();
+ } else {
+ assert(1 && "RewriteBlockClass: Bad type");
+ }
+ assert(CPT && "RewriteBlockClass: Bad type");
+ const FunctionType *FT = CPT->getPointeeType()->getAsFunctionType();
+ assert(FT && "RewriteBlockClass: Bad type");
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+
+ // Build a closure call - start with a paren expr to enforce precedence.
+ std::string BlockCall = "(";
+
+ // Synthesize the cast.
+ BlockCall += "(" + Exp->getType().getAsString() + "(*)";
+ BlockCall += "(struct __block_impl *";
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I)
+ BlockCall += ", " + (*I).getAsString();
+ }
+ BlockCall += "))"; // close the argument list and paren expression.
+
+ // Invoke the closure. We need to cast it since the declaration type is
+ // bogus (it's a function pointer type)
+ BlockCall += "((struct __block_impl *)";
+ std::string closureExprBufStr;
+ llvm::raw_string_ostream closureExprBuf(closureExprBufStr);
+ Exp->getCallee()->printPretty(closureExprBuf, *Context);
+ BlockCall += closureExprBuf.str();
+ BlockCall += ")->FuncPtr)";
+
+ // Add the arguments.
+ BlockCall += "((struct __block_impl *)";
+ BlockCall += closureExprBuf.str();
+ for (CallExpr::arg_iterator I = Exp->arg_begin(),
+ E = Exp->arg_end(); I != E; ++I) {
+ std::string syncExprBufS;
+ llvm::raw_string_ostream Buf(syncExprBufS);
+ (*I)->printPretty(Buf, *Context);
+ BlockCall += ", " + Buf.str();
+ }
+ return BlockCall;
+}
+
+void RewriteBlocks::RewriteBlockCall(CallExpr *Exp) {
+ std::string BlockCall = SynthesizeBlockCall(Exp);
+
+ const char *startBuf = SM->getCharacterData(Exp->getLocStart());
+ const char *endBuf = SM->getCharacterData(Exp->getLocEnd());
+
+ ReplaceText(Exp->getLocStart(), endBuf-startBuf,
+ BlockCall.c_str(), BlockCall.size());
+}
+
+void RewriteBlocks::RewriteBlockDeclRefExpr(BlockDeclRefExpr *BDRE) {
+ // FIXME: Add more elaborate code generation required by the ABI.
+ InsertText(BDRE->getLocStart(), "*", 1);
+}
+
+void RewriteBlocks::RewriteCastExpr(CastExpr *CE) {
+ SourceLocation LocStart = CE->getLocStart();
+ SourceLocation LocEnd = CE->getLocEnd();
+
+ if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
+ return;
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // advance the location to startArgList.
+ const char *argPtr = startBuf;
+
+ while (*argPtr++ && (argPtr < endBuf)) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ LocStart = LocStart.getFileLocWithOffset(argPtr-startBuf);
+ ReplaceText(LocStart, 1, "*", 1);
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteBlocks::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
+ SourceLocation DeclLoc = FD->getLocation();
+ unsigned parenCount = 0;
+
+ // We have 1 or more arguments that have closure pointers.
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *startArgList = strchr(startBuf, '(');
+
+ assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
+
+ parenCount++;
+ // advance the location to startArgList.
+ DeclLoc = DeclLoc.getFileLocWithOffset(startArgList-startBuf);
+ assert((DeclLoc.isValid()) && "Invalid DeclLoc");
+
+ const char *argPtr = startArgList;
+
+ while (*argPtr++ && parenCount) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ DeclLoc = DeclLoc.getFileLocWithOffset(argPtr-startArgList);
+ ReplaceText(DeclLoc, 1, "*", 1);
+ break;
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ }
+ }
+ return;
+}
+
+bool RewriteBlocks::PointerTypeTakesAnyBlockArguments(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAsPointerType();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAsFunctionProtoType();
+ } else {
+ const BlockPointerType *BPT = QT->getAsBlockPointerType();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAsFunctionProtoType();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I)
+ if (isBlockPointerType(*I))
+ return true;
+ }
+ return false;
+}
+
+void RewriteBlocks::GetExtentOfArgList(const char *Name,
+ const char *&LParen, const char *&RParen) {
+ const char *argPtr = strchr(Name, '(');
+ assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
+
+ LParen = argPtr; // output the start.
+ argPtr++; // skip past the left paren.
+ unsigned parenCount = 1;
+
+ while (*argPtr && parenCount) {
+ switch (*argPtr) {
+ case '(': parenCount++; break;
+ case ')': parenCount--; break;
+ default: break;
+ }
+ if (parenCount) argPtr++;
+ }
+ assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
+ RParen = argPtr; // output the end
+}
+
+void RewriteBlocks::RewriteBlockPointerDecl(NamedDecl *ND) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ RewriteBlockPointerFunctionArgs(FD);
+ return;
+ }
+ // Handle Variables and Typedefs.
+ SourceLocation DeclLoc = ND->getLocation();
+ QualType DeclT;
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ DeclT = VD->getType();
+ else if (TypedefDecl *TDD = dyn_cast<TypedefDecl>(ND))
+ DeclT = TDD->getUnderlyingType();
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
+ DeclT = FD->getType();
+ else
+ assert(0 && "RewriteBlockPointerDecl(): Decl type not yet handled");
+
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *endBuf = startBuf;
+ // scan backward (from the decl location) for the end of the previous decl.
+ while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
+ startBuf--;
+
+ // *startBuf != '^' if we are dealing with a pointer to function that
+ // may take block argument types (which will be handled below).
+ if (*startBuf == '^') {
+ // Replace the '^' with '*', computing a negative offset.
+ DeclLoc = DeclLoc.getFileLocWithOffset(startBuf-endBuf);
+ ReplaceText(DeclLoc, 1, "*", 1);
+ }
+ if (PointerTypeTakesAnyBlockArguments(DeclT)) {
+ // Replace the '^' with '*' for arguments.
+ DeclLoc = ND->getLocation();
+ startBuf = SM->getCharacterData(DeclLoc);
+ const char *argListBegin, *argListEnd;
+ GetExtentOfArgList(startBuf, argListBegin, argListEnd);
+ while (argListBegin < argListEnd) {
+ if (*argListBegin == '^') {
+ SourceLocation CaretLoc = DeclLoc.getFileLocWithOffset(argListBegin-startBuf);
+ ReplaceText(CaretLoc, 1, "*", 1);
+ }
+ argListBegin++;
+ }
+ }
+ return;
+}
+
+void RewriteBlocks::CollectBlockDeclRefInfo(BlockExpr *Exp) {
+ // Add initializers for any closure decl refs.
+ GetBlockDeclRefExprs(Exp->getBody());
+ if (BlockDeclRefs.size()) {
+ // Unique all "by copy" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (!BlockDeclRefs[i]->isByRef())
+ BlockByCopyDecls.insert(BlockDeclRefs[i]->getDecl());
+ // Unique all "by ref" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->isByRef()) {
+ BlockByRefDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (isBlockPointerType(BlockDeclRefs[i]->getType())) {
+ GetBlockCallExprs(Blocks[i]);
+ ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+ }
+}
+
+std::string RewriteBlocks::SynthesizeBlockInitExpr(BlockExpr *Exp, VarDecl *VD) {
+ Blocks.push_back(Exp);
+
+ CollectBlockDeclRefInfo(Exp);
+ std::string FuncName;
+
+ if (CurFunctionDef)
+ FuncName = std::string(CurFunctionDef->getNameAsString());
+ else if (CurMethodDef) {
+ FuncName = CurMethodDef->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = FuncName.find(":", loc)) != std::string::npos)
+ FuncName.replace(loc, 1, "_");
+ } else if (VD)
+ FuncName = std::string(VD->getNameAsString());
+
+ std::string BlockNumber = utostr(Blocks.size()-1);
+
+ std::string Tag = "__" + FuncName + "_block_impl_" + BlockNumber;
+ std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
+
+ std::string FunkTypeStr;
+
+ // Get a pointer to the function type so we can cast appropriately.
+ Context->getPointerType(QualType(Exp->getFunctionType(),0))
+ .getAsStringInternal(FunkTypeStr, Context->PrintingPolicy);
+
+ // Rewrite the closure block with a compound literal. The first cast is
+ // to prevent warnings from the C compiler.
+ std::string Init = "(" + FunkTypeStr;
+
+ Init += ")&" + Tag;
+
+ // Initialize the block function.
+ Init += "((void*)" + Func;
+
+ if (ImportedBlockDecls.size()) {
+ std::string Buf = "__" + FuncName + "_block_copy_" + BlockNumber;
+ Init += ",(void*)" + Buf;
+ Buf = "__" + FuncName + "_block_dispose_" + BlockNumber;
+ Init += ",(void*)" + Buf;
+ }
+ // Add initializers for any closure decl refs.
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ Init += ",";
+ if (isObjCType((*I)->getType())) {
+ Init += "[[";
+ Init += (*I)->getNameAsString();
+ Init += " retain] autorelease]";
+ } else if (isBlockPointerType((*I)->getType())) {
+ Init += "(void *)";
+ Init += (*I)->getNameAsString();
+ } else {
+ Init += (*I)->getNameAsString();
+ }
+ }
+ // Output all "by ref" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ Init += ",&";
+ Init += (*I)->getNameAsString();
+ }
+ }
+ Init += ")";
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByCopyDecls.clear();
+ ImportedBlockDecls.clear();
+
+ return Init;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Body / Expression rewriting
+//===----------------------------------------------------------------------===//
+
+Stmt *RewriteBlocks::RewriteFunctionBody(Stmt *S) {
+ // Start by rewriting all children.
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
+ Stmt *newStmt = RewriteFunctionBody(CBE->getBody());
+ if (newStmt)
+ *CI = newStmt;
+
+ // We've just rewritten the block body in place.
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string S = Rewrite.getRewritenText(CBE->getSourceRange());
+ RewrittenBlockExprs[CBE] = S;
+ std::string Init = SynthesizeBlockInitExpr(CBE);
+ // Do the rewrite, using S.size() which contains the rewritten size.
+ ReplaceText(CBE->getLocStart(), S.size(), Init.c_str(), Init.size());
+ } else {
+ Stmt *newStmt = RewriteFunctionBody(*CI);
+ if (newStmt)
+ *CI = newStmt;
+ }
+ }
+ // Handle specific things.
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType())
+ RewriteBlockCall(CE);
+ }
+ if (CastExpr *CE = dyn_cast<CastExpr>(S)) {
+ RewriteCastExpr(CE);
+ }
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+
+ Decl *SD = *DI;
+ if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
+ if (isBlockPointerType(ND->getType()))
+ RewriteBlockPointerDecl(ND);
+ else if (ND->getType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(ND->getType(), ND);
+ }
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(SD)) {
+ if (isBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ }
+ }
+ // Handle specific things.
+ if (BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
+ if (BDRE->isByRef())
+ RewriteBlockDeclRefExpr(BDRE);
+ }
+ // Return this stmt unmodified.
+ return S;
+}
+
+void RewriteBlocks::RewriteFunctionProtoType(QualType funcType, NamedDecl *D) {
+ if (FunctionProtoType *fproto = dyn_cast<FunctionProtoType>(funcType)) {
+ for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
+ E = fproto->arg_type_end(); I && (I != E); ++I)
+ if (isBlockPointerType(*I)) {
+ // All the args are checked/rewritten. Don't call twice!
+ RewriteBlockPointerDecl(D);
+ break;
+ }
+ }
+}
+
+void RewriteBlocks::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
+ const PointerType *PT = funcType->getAsPointerType();
+ if (PT && PointerTypeTakesAnyBlockArguments(funcType))
+ RewriteFunctionProtoType(PT->getPointeeType(), ND);
+}
+
+/// HandleDeclInMainFile - This is called for each top-level decl defined in the
+/// main file of the input.
+void RewriteBlocks::HandleDeclInMainFile(Decl *D) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteFunctionProtoType(FD->getType(), FD);
+
+ // FIXME: Handle CXXTryStmt
+ if (CompoundStmt *Body = FD->getCompoundBody(*Context)) {
+ CurFunctionDef = FD;
+ FD->setBody(cast_or_null<CompoundStmt>(RewriteFunctionBody(Body)));
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ CurFunctionDef = 0;
+ }
+ return;
+ }
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ RewriteMethodDecl(MD);
+ if (Stmt *Body = MD->getBody(*Context)) {
+ CurMethodDef = MD;
+ RewriteFunctionBody(Body);
+ InsertBlockLiteralsWithinMethod(MD);
+ CurMethodDef = 0;
+ }
+ }
+ if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (isBlockPointerType(VD->getType())) {
+ RewriteBlockPointerDecl(VD);
+ if (VD->getInit()) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(VD->getInit())) {
+ RewriteFunctionBody(CBE->getBody(*Context));
+
+ // We've just rewritten the block body in place.
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string S = Rewrite.getRewritenText(CBE->getSourceRange());
+ RewrittenBlockExprs[CBE] = S;
+ std::string Init = SynthesizeBlockInitExpr(CBE, VD);
+ // Do the rewrite, using S.size() which contains the rewritten size.
+ ReplaceText(CBE->getLocStart(), S.size(), Init.c_str(), Init.size());
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(),
+ VD->getNameAsCString());
+ } else if (CastExpr *CE = dyn_cast<CastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ } else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CastExpr *CE = dyn_cast<CastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ }
+ return;
+ }
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
+ if (isBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ return;
+ }
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ if (RD->isDefinition()) {
+ for (RecordDecl::field_iterator i = RD->field_begin(*Context),
+ e = RD->field_end(*Context); i != e; ++i) {
+ FieldDecl *FD = *i;
+ if (isBlockPointerType(FD->getType()))
+ RewriteBlockPointerDecl(FD);
+ }
+ }
+ return;
+ }
+}
diff --git a/lib/Frontend/RewriteMacros.cpp b/lib/Frontend/RewriteMacros.cpp
new file mode 100644
index 0000000..5ef4892
--- /dev/null
+++ b/lib/Frontend/RewriteMacros.cpp
@@ -0,0 +1,215 @@
+//===--- RewriteMacros.cpp - Rewrite macros into their expansions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code rewrites macro invocations into their expansions. This gives you
+// a macro expanded file that retains comments and #includes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/System/Path.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+/// isSameToken - Return true if the two specified tokens start have the same
+/// content.
+static bool isSameToken(Token &RawTok, Token &PPTok) {
+ // If two tokens have the same kind and the same identifier info, they are
+ // obviously the same.
+ if (PPTok.getKind() == RawTok.getKind() &&
+ PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
+ return true;
+
+ // Otherwise, if they are different but have the same identifier info, they
+ // are also considered to be the same. This allows keywords and raw lexed
+ // identifiers with the same name to be treated the same.
+ if (PPTok.getIdentifierInfo() &&
+ PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
+ return true;
+
+ return false;
+}
+
+
+/// GetNextRawTok - Return the next raw token in the stream, skipping over
+/// comments if ReturnComment is false.
+static const Token &GetNextRawTok(const std::vector<Token> &RawTokens,
+ unsigned &CurTok, bool ReturnComment) {
+ assert(CurTok < RawTokens.size() && "Overran eof!");
+
+ // If the client doesn't want comments and we have one, skip it.
+ if (!ReturnComment && RawTokens[CurTok].is(tok::comment))
+ ++CurTok;
+
+ return RawTokens[CurTok++];
+}
+
+
+/// LexRawTokensFromMainFile - Lets all the raw tokens from the main file into
+/// the specified vector.
+static void LexRawTokensFromMainFile(Preprocessor &PP,
+ std::vector<Token> &RawTokens) {
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create a lexer to lex all the tokens of the main file in raw mode. Even
+ // though it is in raw mode, it will not return comments.
+ Lexer RawLex(SM.getMainFileID(), SM, PP.getLangOptions());
+
+ // Switch on comment lexing because we really do want them.
+ RawLex.SetCommentRetentionState(true);
+
+ Token RawTok;
+ do {
+ RawLex.LexFromRawLexer(RawTok);
+
+ // If we have an identifier with no identifier info for our raw token, look
+ // up the indentifier info. This is important for equality comparison of
+ // identifier tokens.
+ if (RawTok.is(tok::identifier) && !RawTok.getIdentifierInfo())
+ RawTok.setIdentifierInfo(PP.LookUpIdentifierInfo(RawTok));
+
+ RawTokens.push_back(RawTok);
+ } while (RawTok.isNot(tok::eof));
+}
+
+
+/// RewriteMacrosInInput - Implement -rewrite-macros mode.
+void clang::RewriteMacrosInInput(Preprocessor &PP, llvm::raw_ostream *OS) {
+ SourceManager &SM = PP.getSourceManager();
+
+ Rewriter Rewrite;
+ Rewrite.setSourceMgr(SM, PP.getLangOptions());
+ RewriteBuffer &RB = Rewrite.getEditBuffer(SM.getMainFileID());
+
+ std::vector<Token> RawTokens;
+ LexRawTokensFromMainFile(PP, RawTokens);
+ unsigned CurRawTok = 0;
+ Token RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+
+
+ // Get the first preprocessing token.
+ PP.EnterMainSourceFile();
+ Token PPTok;
+ PP.Lex(PPTok);
+
+ // Preprocess the input file in parallel with raw lexing the main file. Ignore
+ // all tokens that are preprocessed from a file other than the main file (e.g.
+ // a header). If we see tokens that are in the preprocessed file but not the
+ // lexed file, we have a macro expansion. If we see tokens in the lexed file
+ // that aren't in the preprocessed view, we have macros that expand to no
+ // tokens, or macro arguments etc.
+ while (RawTok.isNot(tok::eof) || PPTok.isNot(tok::eof)) {
+ SourceLocation PPLoc = SM.getInstantiationLoc(PPTok.getLocation());
+
+ // If PPTok is from a different source file, ignore it.
+ if (!SM.isFromMainFile(PPLoc)) {
+ PP.Lex(PPTok);
+ continue;
+ }
+
+ // If the raw file hits a preprocessor directive, they will be extra tokens
+ // in the raw file that don't exist in the preprocsesed file. However, we
+ // choose to preserve them in the output file and otherwise handle them
+ // specially.
+ if (RawTok.is(tok::hash) && RawTok.isAtStartOfLine()) {
+ // If this is a #warning directive or #pragma mark (GNU extensions),
+ // comment the line out.
+ if (RawTokens[CurRawTok].is(tok::identifier)) {
+ const IdentifierInfo *II = RawTokens[CurRawTok].getIdentifierInfo();
+ if (!strcmp(II->getName(), "warning")) {
+ // Comment out #warning.
+ RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//", 2);
+ } else if (!strcmp(II->getName(), "pragma") &&
+ RawTokens[CurRawTok+1].is(tok::identifier) &&
+ !strcmp(RawTokens[CurRawTok+1].getIdentifierInfo()->getName(),
+ "mark")){
+ // Comment out #pragma mark.
+ RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//", 2);
+ }
+ }
+
+ // Otherwise, if this is a #include or some other directive, just leave it
+ // in the file by skipping over the line.
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ while (!RawTok.isAtStartOfLine() && RawTok.isNot(tok::eof))
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ continue;
+ }
+
+ // Okay, both tokens are from the same file. Get their offsets from the
+ // start of the file.
+ unsigned PPOffs = SM.getFileOffset(PPLoc);
+ unsigned RawOffs = SM.getFileOffset(RawTok.getLocation());
+
+ // If the offsets are the same and the token kind is the same, ignore them.
+ if (PPOffs == RawOffs && isSameToken(RawTok, PPTok)) {
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ PP.Lex(PPTok);
+ continue;
+ }
+
+ // If the PP token is farther along than the raw token, something was
+ // deleted. Comment out the raw token.
+ if (RawOffs <= PPOffs) {
+ // Comment out a whole run of tokens instead of bracketing each one with
+ // comments. Add a leading space if RawTok didn't have one.
+ bool HasSpace = RawTok.hasLeadingSpace();
+ RB.InsertTextAfter(RawOffs, " /*"+HasSpace, 2+!HasSpace);
+ unsigned EndPos;
+
+ do {
+ EndPos = RawOffs+RawTok.getLength();
+
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, true);
+ RawOffs = SM.getFileOffset(RawTok.getLocation());
+
+ if (RawTok.is(tok::comment)) {
+ // Skip past the comment.
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ break;
+ }
+
+ } while (RawOffs <= PPOffs && !RawTok.isAtStartOfLine() &&
+ (PPOffs != RawOffs || !isSameToken(RawTok, PPTok)));
+
+ RB.InsertTextBefore(EndPos, "*/", 2);
+ continue;
+ }
+
+ // Otherwise, there was a replacement an expansion. Insert the new token
+ // in the output buffer. Insert the whole run of new tokens at once to get
+ // them in the right order.
+ unsigned InsertPos = PPOffs;
+ std::string Expansion;
+ while (PPOffs < RawOffs) {
+ Expansion += ' ' + PP.getSpelling(PPTok);
+ PP.Lex(PPTok);
+ PPLoc = SM.getInstantiationLoc(PPTok.getLocation());
+ PPOffs = SM.getFileOffset(PPLoc);
+ }
+ Expansion += ' ';
+ RB.InsertTextBefore(InsertPos, &Expansion[0], Expansion.size());
+ }
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(SM.getMainFileID())) {
+ //printf("Changed:\n");
+ *OS << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ fprintf(stderr, "No changes\n");
+ }
+ OS->flush();
+}
diff --git a/lib/Frontend/RewriteObjC.cpp b/lib/Frontend/RewriteObjC.cpp
new file mode 100644
index 0000000..f382704
--- /dev/null
+++ b/lib/Frontend/RewriteObjC.cpp
@@ -0,0 +1,4693 @@
+//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hacks and fun related to the code rewriter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using llvm::utostr;
+
+namespace {
+ class RewriteObjC : public ASTConsumer {
+ Rewriter Rewrite;
+ Diagnostic &Diags;
+ const LangOptions &LangOpts;
+ unsigned RewriteFailedDiag;
+ unsigned TryFinallyContainsReturnDiag;
+
+ ASTContext *Context;
+ SourceManager *SM;
+ TranslationUnitDecl *TUDecl;
+ FileID MainFileID;
+ const char *MainFileStart, *MainFileEnd;
+ SourceLocation LastIncLoc;
+
+ llvm::SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
+ llvm::SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCForwardDecls;
+ llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
+ llvm::SmallVector<Stmt *, 32> Stmts;
+ llvm::SmallVector<int, 8> ObjCBcLabelNo;
+ // Remember all the @protocol(<expr>) expressions.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
+
+ unsigned NumObjCStringLiterals;
+
+ FunctionDecl *MsgSendFunctionDecl;
+ FunctionDecl *MsgSendSuperFunctionDecl;
+ FunctionDecl *MsgSendStretFunctionDecl;
+ FunctionDecl *MsgSendSuperStretFunctionDecl;
+ FunctionDecl *MsgSendFpretFunctionDecl;
+ FunctionDecl *GetClassFunctionDecl;
+ FunctionDecl *GetMetaClassFunctionDecl;
+ FunctionDecl *SelGetUidFunctionDecl;
+ FunctionDecl *CFStringFunctionDecl;
+ FunctionDecl *SuperContructorFunctionDecl;
+
+ // ObjC string constant support.
+ VarDecl *ConstantStringClassReference;
+ RecordDecl *NSStringRecord;
+
+ // ObjC foreach break/continue generation support.
+ int BcLabelCount;
+
+ // Needed for super.
+ ObjCMethodDecl *CurMethodDef;
+ RecordDecl *SuperStructDecl;
+ RecordDecl *ConstantStringDecl;
+
+ TypeDecl *ProtocolTypeDecl;
+ QualType getProtocolType();
+
+ // Needed for header files being rewritten
+ bool IsHeader;
+
+ std::string InFileName;
+ llvm::raw_ostream* OutFile;
+
+ bool SilenceRewriteMacroWarning;
+
+ std::string Preamble;
+
+ // Block expressions.
+ llvm::SmallVector<BlockExpr *, 32> Blocks;
+ llvm::SmallVector<BlockDeclRefExpr *, 32> BlockDeclRefs;
+ llvm::DenseMap<BlockDeclRefExpr *, CallExpr *> BlockCallExprs;
+
+ // Block related declarations.
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
+
+ llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
+
+ // This maps a property to it's assignment statement.
+ llvm::DenseMap<ObjCPropertyRefExpr *, BinaryOperator *> PropSetters;
+ // This maps a property to it's synthesied message expression.
+ // This allows us to rewrite chained getters (e.g. o.a.b.c).
+ llvm::DenseMap<ObjCPropertyRefExpr *, Stmt *> PropGetters;
+
+ // This maps an original source AST to it's rewritten form. This allows
+ // us to avoid rewriting the same node twice (which is very uncommon).
+ // This is needed to support some of the exotic property rewriting.
+ llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
+
+ FunctionDecl *CurFunctionDef;
+ VarDecl *GlobalVarDecl;
+
+ bool DisableReplaceStmt;
+
+ static const int OBJC_ABI_VERSION =7 ;
+ public:
+ virtual void Initialize(ASTContext &context);
+
+ // Top Level Driver code.
+ virtual void HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ }
+ void HandleTopLevelSingleDecl(Decl *D);
+ void HandleDeclInMainFile(Decl *D);
+ RewriteObjC(std::string inFile, llvm::raw_ostream *OS,
+ Diagnostic &D, const LangOptions &LOpts,
+ bool silenceMacroWarn);
+
+ ~RewriteObjC() {}
+
+ virtual void HandleTranslationUnit(ASTContext &C);
+
+ void ReplaceStmt(Stmt *Old, Stmt *New) {
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
+ if (DisableReplaceStmt)
+ return; // Used when rewriting the assignment of a property setter.
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceStmt(Old, New)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
+ // Measaure the old text.
+ int Size = Rewrite.getRangeSize(SrcRange);
+ if (Size == -1) {
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ return;
+ }
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ New->printPretty(S, *Context);
+ const std::string &Str = S.str();
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, &Str[0], Str.size())) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void InsertText(SourceLocation Loc, const char *StrData, unsigned StrLen,
+ bool InsertAfter = true) {
+ // If insertion succeeded or warning disabled return with no warning.
+ if (!Rewrite.InsertText(Loc, StrData, StrLen, InsertAfter) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+ }
+
+ void RemoveText(SourceLocation Loc, unsigned StrLen) {
+ // If removal succeeded or warning disabled return with no warning.
+ if (!Rewrite.RemoveText(Loc, StrLen) || SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+ }
+
+ void ReplaceText(SourceLocation Start, unsigned OrigLength,
+ const char *NewStr, unsigned NewLength) {
+ // If removal succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(Start, OrigLength, NewStr, NewLength) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
+ }
+
+ // Syntactic Rewriting.
+ void RewritePrologue(SourceLocation Loc);
+ void RewriteInclude();
+ void RewriteTabs();
+ void RewriteForwardClassDecl(ObjCClassDecl *Dcl);
+ void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID);
+ void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
+ void RewriteImplementationDecl(Decl *Dcl);
+ void RewriteObjCMethodDecl(ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
+ void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
+ void RewriteForwardProtocolDecl(ObjCForwardProtocolDecl *Dcl);
+ void RewriteMethodDeclaration(ObjCMethodDecl *Method);
+ void RewriteProperty(ObjCPropertyDecl *prop);
+ void RewriteFunctionDecl(FunctionDecl *FD);
+ void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
+ void RewriteObjCQualifiedInterfaceTypes(Expr *E);
+ bool needToScanForQualifiers(QualType T);
+ ObjCInterfaceDecl *isSuperReceiver(Expr *recExpr);
+ QualType getSuperStructType();
+ QualType getConstantStringStructType();
+ bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
+
+ // Expression Rewriting.
+ Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
+ void CollectPropertySetters(Stmt *S);
+
+ Stmt *CurrentBody;
+ ParentMap *PropParentMap; // created lazily.
+
+ Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
+ Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV, SourceLocation OrigStart);
+ Stmt *RewritePropertyGetter(ObjCPropertyRefExpr *PropRefExpr);
+ Stmt *RewritePropertySetter(BinaryOperator *BinOp, Expr *newStmt,
+ SourceRange SrcRange);
+ Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
+ Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
+ Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
+ Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
+ void WarnAboutReturnGotoContinueOrBreakStmts(Stmt *S);
+ Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
+ Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ Stmt *RewriteObjCCatchStmt(ObjCAtCatchStmt *S);
+ Stmt *RewriteObjCFinallyStmt(ObjCAtFinallyStmt *S);
+ Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
+ Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd);
+ CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ Expr **args, unsigned nargs);
+ Stmt *SynthMessageExpr(ObjCMessageExpr *Exp);
+ Stmt *RewriteBreakStmt(BreakStmt *S);
+ Stmt *RewriteContinueStmt(ContinueStmt *S);
+ void SynthCountByEnumWithState(std::string &buf);
+
+ void SynthMsgSendFunctionDecl();
+ void SynthMsgSendSuperFunctionDecl();
+ void SynthMsgSendStretFunctionDecl();
+ void SynthMsgSendFpretFunctionDecl();
+ void SynthMsgSendSuperStretFunctionDecl();
+ void SynthGetClassFunctionDecl();
+ void SynthGetMetaClassFunctionDecl();
+ void SynthSelGetUidFunctionDecl();
+ void SynthSuperContructorFunctionDecl();
+
+ // Metadata emission.
+ void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result);
+
+ void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result);
+
+ template<typename MethodIterator>
+ void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ const char *prefix,
+ const char *ClassName,
+ std::string &Result);
+
+ void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ const char *prefix,
+ const char *ClassName,
+ std::string &Result);
+ void RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Prots,
+ const char *prefix,
+ const char *ClassName,
+ std::string &Result);
+ void SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+ void SynthesizeIvarOffsetComputation(ObjCImplementationDecl *IDecl,
+ ObjCIvarDecl *ivar,
+ std::string &Result);
+ void RewriteImplementations();
+ void SynthesizeMetaDataIntoBuffer(std::string &Result);
+
+ // Block rewriting.
+ void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ // Block specific rewrite rules.
+ void RewriteBlockCall(CallExpr *Exp);
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ Stmt *RewriteBlockDeclRefExpr(BlockDeclRefExpr *VD);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ const char *funcName, std::string Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
+ const char *funcName, std::string Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ bool hasCopyDisposeHelpers);
+ Stmt *SynthesizeBlockCall(CallExpr *Exp);
+ void SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ const char *FunName);
+
+ void CollectBlockDeclRefInfo(BlockExpr *Exp);
+ void GetBlockCallExprs(Stmt *S);
+ void GetBlockDeclRefExprs(Stmt *S);
+
+ // We avoid calling Type::isBlockPointerType(), since it operates on the
+ // canonical type. We only care if the top-level type is a closure pointer.
+ bool isTopLevelBlockPointerType(QualType T) {
+ return isa<BlockPointerType>(T);
+ }
+
+ // FIXME: This predicate seems like it would be useful to add to ASTContext.
+ bool isObjCType(QualType T) {
+ if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ return false;
+
+ QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
+
+ if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
+ OCT == Context->getCanonicalType(Context->getObjCClassType()))
+ return true;
+
+ if (const PointerType *PT = OCT->getAsPointerType()) {
+ if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
+ isa<ObjCQualifiedIdType>(PT->getPointeeType()))
+ return true;
+ }
+ return false;
+ }
+ bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ void GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen);
+ void RewriteCastExpr(CStyleCastExpr *CE);
+
+ FunctionDecl *SynthBlockInitFunctionDecl(const char *name);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp);
+
+ void QuoteDoublequotes(std::string &From, std::string &To) {
+ for(unsigned i = 0; i < From.length(); i++) {
+ if (From[i] == '"')
+ To += "\\\"";
+ else
+ To += From[i];
+ }
+ }
+ };
+}
+
+void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
+ NamedDecl *D) {
+ if (FunctionProtoType *fproto = dyn_cast<FunctionProtoType>(funcType)) {
+ for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
+ E = fproto->arg_type_end(); I && (I != E); ++I)
+ if (isTopLevelBlockPointerType(*I)) {
+ // All the args are checked/rewritten. Don't call twice!
+ RewriteBlockPointerDecl(D);
+ break;
+ }
+ }
+}
+
+void RewriteObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
+ const PointerType *PT = funcType->getAsPointerType();
+ if (PT && PointerTypeTakesAnyBlockArguments(funcType))
+ RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
+}
+
+static bool IsHeaderFile(const std::string &Filename) {
+ std::string::size_type DotPos = Filename.rfind('.');
+
+ if (DotPos == std::string::npos) {
+ // no file extension
+ return false;
+ }
+
+ std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ // C header: .h
+ // C++ header: .hh or .H;
+ return Ext == "h" || Ext == "hh" || Ext == "H";
+}
+
+RewriteObjC::RewriteObjC(std::string inFile, llvm::raw_ostream* OS,
+ Diagnostic &D, const LangOptions &LOpts,
+ bool silenceMacroWarn)
+ : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
+ SilenceRewriteMacroWarning(silenceMacroWarn) {
+ IsHeader = IsHeaderFile(inFile);
+ RewriteFailedDiag = Diags.getCustomDiagID(Diagnostic::Warning,
+ "rewriting sub-expression within a macro (may not be correct)");
+ TryFinallyContainsReturnDiag = Diags.getCustomDiagID(Diagnostic::Warning,
+ "rewriter doesn't support user-specified control flow semantics "
+ "for @try/@finally (code may not execute properly)");
+}
+
+ASTConsumer *clang::CreateObjCRewriter(const std::string& InFile,
+ llvm::raw_ostream* OS,
+ Diagnostic &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning) {
+ return new RewriteObjC(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning);
+}
+
+void RewriteObjC::Initialize(ASTContext &context) {
+ Context = &context;
+ SM = &Context->getSourceManager();
+ TUDecl = Context->getTranslationUnitDecl();
+ MsgSendFunctionDecl = 0;
+ MsgSendSuperFunctionDecl = 0;
+ MsgSendStretFunctionDecl = 0;
+ MsgSendSuperStretFunctionDecl = 0;
+ MsgSendFpretFunctionDecl = 0;
+ GetClassFunctionDecl = 0;
+ GetMetaClassFunctionDecl = 0;
+ SelGetUidFunctionDecl = 0;
+ CFStringFunctionDecl = 0;
+ ConstantStringClassReference = 0;
+ NSStringRecord = 0;
+ CurMethodDef = 0;
+ CurFunctionDef = 0;
+ GlobalVarDecl = 0;
+ SuperStructDecl = 0;
+ ProtocolTypeDecl = 0;
+ ConstantStringDecl = 0;
+ BcLabelCount = 0;
+ SuperContructorFunctionDecl = 0;
+ NumObjCStringLiterals = 0;
+ PropParentMap = 0;
+ CurrentBody = 0;
+ DisableReplaceStmt = false;
+
+ // Get the ID and start/end of the main file.
+ MainFileID = SM->getMainFileID();
+ const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
+ MainFileStart = MainBuf->getBufferStart();
+ MainFileEnd = MainBuf->getBufferEnd();
+
+ Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOptions());
+
+ // declaring objc_selector outside the parameter list removes a silly
+ // scope related warning...
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "struct objc_selector; struct objc_class;\n";
+ Preamble += "struct __rw_objc_super { struct objc_object *object; ";
+ Preamble += "struct objc_object *superClass; ";
+ if (LangOpts.Microsoft) {
+ // Add a constructor for creating temporary objects.
+ Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) "
+ ": ";
+ Preamble += "object(o), superClass(s) {} ";
+ }
+ Preamble += "};\n";
+ Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
+ Preamble += "typedef struct objc_object Protocol;\n";
+ Preamble += "#define _REWRITER_typedef_Protocol\n";
+ Preamble += "#endif\n";
+ if (LangOpts.Microsoft) {
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
+ Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
+ } else
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend_stret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper_stret";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match";
+ Preamble += "(struct objc_class *, struct objc_object *);\n";
+ // @synchronized hooks.
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
+ Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
+ Preamble += "struct __objcFastEnumerationState {\n\t";
+ Preamble += "unsigned long state;\n\t";
+ Preamble += "void **itemsPtr;\n\t";
+ Preamble += "unsigned long *mutationsPtr;\n\t";
+ Preamble += "unsigned long extra[5];\n};\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
+ Preamble += "#define __FASTENUMERATIONSTATE\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "struct __NSConstantStringImpl {\n";
+ Preamble += " int *isa;\n";
+ Preamble += " int flags;\n";
+ Preamble += " char *str;\n";
+ Preamble += " long length;\n";
+ Preamble += "};\n";
+ Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
+ Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "#endif\n";
+ // Blocks preamble.
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Size;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
+ Preamble += "__OBJC_RW_STATICIMPORT void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "__OBJC_RW_STATICIMPORT void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "__OBJC_RW_STATICIMPORT void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "__OBJC_RW_STATICIMPORT void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#endif\n";
+ if (LangOpts.Microsoft) {
+ Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
+ Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
+ Preamble += "#define __attribute__(X)\n";
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver Code
+//===----------------------------------------------------------------------===//
+
+void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) {
+ // Two cases: either the decl could be in the main file, or it could be in a
+ // #included file. If the former, rewrite it now. If the later, check to see
+ // if we rewrote the #include/#import.
+ SourceLocation Loc = D->getLocation();
+ Loc = SM->getInstantiationLoc(Loc);
+
+ // If this is for a builtin, ignore it.
+ if (Loc.isInvalid()) return;
+
+ // Look for built-in declarations that we need to refer during the rewrite.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ RewriteFunctionDecl(FD);
+ } else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
+ // declared in <Foundation/NSString.h>
+ if (strcmp(FVD->getNameAsCString(), "_NSConstantStringClassReference") == 0) {
+ ConstantStringClassReference = FVD;
+ return;
+ }
+ } else if (ObjCInterfaceDecl *MD = dyn_cast<ObjCInterfaceDecl>(D)) {
+ RewriteInterfaceDecl(MD);
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ RewriteCategoryDecl(CD);
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ RewriteProtocolDecl(PD);
+ } else if (ObjCForwardProtocolDecl *FP =
+ dyn_cast<ObjCForwardProtocolDecl>(D)){
+ RewriteForwardProtocolDecl(FP);
+ } else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
+ // Recurse into linkage specifications
+ for (DeclContext::decl_iterator DI = LSD->decls_begin(*Context),
+ DIEnd = LSD->decls_end(*Context);
+ DI != DIEnd; ++DI)
+ HandleTopLevelSingleDecl(*DI);
+ }
+ // If we have a decl in the main file, see if we should rewrite it.
+ if (SM->isFromMainFile(Loc))
+ return HandleDeclInMainFile(D);
+}
+
+//===----------------------------------------------------------------------===//
+// Syntactic (non-AST) Rewriting Code
+//===----------------------------------------------------------------------===//
+
+void RewriteObjC::RewriteInclude() {
+ SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
+ std::pair<const char*, const char*> MainBuf = SM->getBufferData(MainFileID);
+ const char *MainBufStart = MainBuf.first;
+ const char *MainBufEnd = MainBuf.second;
+ size_t ImportLen = strlen("import");
+ size_t IncludeLen = strlen("include");
+
+ // Loop over the whole file, looking for includes.
+ for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
+ if (*BufPtr == '#') {
+ if (++BufPtr == MainBufEnd)
+ return;
+ while (*BufPtr == ' ' || *BufPtr == '\t')
+ if (++BufPtr == MainBufEnd)
+ return;
+ if (!strncmp(BufPtr, "import", ImportLen)) {
+ // replace import with include
+ SourceLocation ImportLoc =
+ LocStart.getFileLocWithOffset(BufPtr-MainBufStart);
+ ReplaceText(ImportLoc, ImportLen, "include", IncludeLen);
+ BufPtr += ImportLen;
+ }
+ }
+ }
+}
+
+void RewriteObjC::RewriteTabs() {
+ std::pair<const char*, const char*> MainBuf = SM->getBufferData(MainFileID);
+ const char *MainBufStart = MainBuf.first;
+ const char *MainBufEnd = MainBuf.second;
+
+ // Loop over the whole file, looking for tabs.
+ for (const char *BufPtr = MainBufStart; BufPtr != MainBufEnd; ++BufPtr) {
+ if (*BufPtr != '\t')
+ continue;
+
+ // Okay, we found a tab. This tab will turn into at least one character,
+ // but it depends on which 'virtual column' it is in. Compute that now.
+ unsigned VCol = 0;
+ while (BufPtr-VCol != MainBufStart && BufPtr[-VCol-1] != '\t' &&
+ BufPtr[-VCol-1] != '\n' && BufPtr[-VCol-1] != '\r')
+ ++VCol;
+
+ // Okay, now that we know the virtual column, we know how many spaces to
+ // insert. We assume 8-character tab-stops.
+ unsigned Spaces = 8-(VCol & 7);
+
+ // Get the location of the tab.
+ SourceLocation TabLoc = SM->getLocForStartOfFile(MainFileID);
+ TabLoc = TabLoc.getFileLocWithOffset(BufPtr-MainBufStart);
+
+ // Rewrite the single tab character into a sequence of spaces.
+ ReplaceText(TabLoc, 1, " ", Spaces);
+ }
+}
+
+static std::string getIvarAccessString(ObjCInterfaceDecl *ClassDecl,
+ ObjCIvarDecl *OID) {
+ std::string S;
+ S = "((struct ";
+ S += ClassDecl->getIdentifier()->getName();
+ S += "_IMPL *)self)->";
+ S += OID->getNameAsCString();
+ return S;
+}
+
+void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID) {
+ SourceLocation startLoc = PID->getLocStart();
+ InsertText(startLoc, "// ", 3);
+ const char *startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @synthesize location");
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@synthesize: can't find ';'");
+ SourceLocation onePastSemiLoc =
+ startLoc.getFileLocWithOffset(semiBuf-startBuf+1);
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ return; // FIXME: is this correct?
+
+ // Generate the 'getter' function.
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCInterfaceDecl *ClassDecl = PD->getGetterMethodDecl()->getClassInterface();
+ ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
+
+ if (!OID)
+ return;
+
+ std::string Getr;
+ RewriteObjCMethodDecl(PD->getGetterMethodDecl(), Getr);
+ Getr += "{ ";
+ // Synthesize an explicit cast to gain access to the ivar.
+ // FIXME: deal with code generation implications for various property
+ // attributes (copy, retain, nonatomic).
+ // See objc-act.c:objc_synthesize_new_getter() for details.
+ Getr += "return " + getIvarAccessString(ClassDecl, OID);
+ Getr += "; }";
+ InsertText(onePastSemiLoc, Getr.c_str(), Getr.size());
+ if (PD->isReadOnly())
+ return;
+
+ // Generate the 'setter' function.
+ std::string Setr;
+ RewriteObjCMethodDecl(PD->getSetterMethodDecl(), Setr);
+ Setr += "{ ";
+ // Synthesize an explicit cast to initialize the ivar.
+ // FIXME: deal with code generation implications for various property
+ // attributes (copy, retain, nonatomic).
+ // See objc-act.c:objc_synthesize_new_setter() for details.
+ Setr += getIvarAccessString(ClassDecl, OID) + " = ";
+ Setr += PD->getNameAsCString();
+ Setr += "; }";
+ InsertText(onePastSemiLoc, Setr.c_str(), Setr.size());
+}
+
+void RewriteObjC::RewriteForwardClassDecl(ObjCClassDecl *ClassDecl) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = ClassDecl->getLocation();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *semiPtr = strchr(startBuf, ';');
+
+ // Translate to typedef's that forward reference structs with the same name
+ // as the class. As a convenience, we include the original declaration
+ // as a comment.
+ std::string typedefString;
+ typedefString += "// ";
+ typedefString.append(startBuf, semiPtr-startBuf+1);
+ typedefString += "\n";
+ for (ObjCClassDecl::iterator I = ClassDecl->begin(), E = ClassDecl->end();
+ I != E; ++I) {
+ ObjCInterfaceDecl *ForwardDecl = *I;
+ typedefString += "#ifndef _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "#define _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "typedef struct objc_object ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n#endif\n";
+ }
+
+ // Replace the @class with typedefs corresponding to the classes.
+ ReplaceText(startLoc, semiPtr-startBuf+1,
+ typedefString.c_str(), typedefString.size());
+}
+
+void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
+ SourceLocation LocStart = Method->getLocStart();
+ SourceLocation LocEnd = Method->getLocEnd();
+
+ if (SM->getInstantiationLineNumber(LocEnd) >
+ SM->getInstantiationLineNumber(LocStart)) {
+ InsertText(LocStart, "#if 0\n", 6);
+ ReplaceText(LocEnd, 1, ";\n#endif\n", 9);
+ } else {
+ InsertText(LocStart, "// ", 3);
+ }
+}
+
+void RewriteObjC::RewriteProperty(ObjCPropertyDecl *prop)
+{
+ SourceLocation Loc = prop->getLocation();
+
+ ReplaceText(Loc, 0, "// ", 3);
+
+ // FIXME: handle properties that are declared across multiple lines.
+}
+
+void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
+ SourceLocation LocStart = CatDecl->getLocStart();
+
+ // FIXME: handle category headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ", 3);
+
+ for (ObjCCategoryDecl::instmeth_iterator
+ I = CatDecl->instmeth_begin(*Context),
+ E = CatDecl->instmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCCategoryDecl::classmeth_iterator
+ I = CatDecl->classmeth_begin(*Context),
+ E = CatDecl->classmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(CatDecl->getAtEndLoc(), 0, "// ", 3);
+}
+
+void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
+ std::pair<const char*, const char*> MainBuf = SM->getBufferData(MainFileID);
+
+ SourceLocation LocStart = PDecl->getLocStart();
+
+ // FIXME: handle protocol headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ", 3);
+
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(*Context),
+ E = PDecl->instmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(*Context),
+ E = PDecl->classmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ SourceLocation LocEnd = PDecl->getAtEndLoc();
+ ReplaceText(LocEnd, 0, "// ", 3);
+
+ // Must comment out @optional/@required
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ for (const char *p = startBuf; p < endBuf; p++) {
+ if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
+ std::string CommentedOptional = "/* @optional */";
+ SourceLocation OptionalLoc = LocStart.getFileLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@optional"),
+ CommentedOptional.c_str(), CommentedOptional.size());
+
+ }
+ else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
+ std::string CommentedRequired = "/* @required */";
+ SourceLocation OptionalLoc = LocStart.getFileLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@required"),
+ CommentedRequired.c_str(), CommentedRequired.size());
+
+ }
+ }
+}
+
+void RewriteObjC::RewriteForwardProtocolDecl(ObjCForwardProtocolDecl *PDecl) {
+ SourceLocation LocStart = PDecl->getLocation();
+ if (LocStart.isInvalid())
+ assert(false && "Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ", 3);
+}
+
+void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
+ std::string &ResultStr) {
+ //fprintf(stderr,"In RewriteObjCMethodDecl\n");
+ const FunctionType *FPRetType = 0;
+ ResultStr += "\nstatic ";
+ if (OMD->getResultType()->isObjCQualifiedIdType())
+ ResultStr += "id";
+ else if (OMD->getResultType()->isFunctionPointerType() ||
+ OMD->getResultType()->isBlockPointerType()) {
+ // needs special handling, since pointer-to-functions have special
+ // syntax (where a decaration models use).
+ QualType retType = OMD->getResultType();
+ QualType PointeeTy;
+ if (const PointerType* PT = retType->getAsPointerType())
+ PointeeTy = PT->getPointeeType();
+ else if (const BlockPointerType *BPT = retType->getAsBlockPointerType())
+ PointeeTy = BPT->getPointeeType();
+ if ((FPRetType = PointeeTy->getAsFunctionType())) {
+ ResultStr += FPRetType->getResultType().getAsString();
+ ResultStr += "(*";
+ }
+ } else
+ ResultStr += OMD->getResultType().getAsString();
+ ResultStr += " ";
+
+ // Unique method name
+ std::string NameStr;
+
+ if (OMD->isInstanceMethod())
+ NameStr += "_I_";
+ else
+ NameStr += "_C_";
+
+ NameStr += OMD->getClassInterface()->getNameAsString();
+ NameStr += "_";
+
+ if (ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ NameStr += CID->getNameAsString();
+ NameStr += "_";
+ }
+ // Append selector names, replacing ':' with '_'
+ {
+ std::string selString = OMD->getSelector().getAsString();
+ int len = selString.size();
+ for (int i = 0; i < len; i++)
+ if (selString[i] == ':')
+ selString[i] = '_';
+ NameStr += selString;
+ }
+ // Remember this name for metadata emission
+ MethodInternalNames[OMD] = NameStr;
+ ResultStr += NameStr;
+
+ // Rewrite arguments
+ ResultStr += "(";
+
+ // invisible arguments
+ if (OMD->isInstanceMethod()) {
+ QualType selfTy = Context->getObjCInterfaceType(OMD->getClassInterface());
+ selfTy = Context->getPointerType(selfTy);
+ if (!LangOpts.Microsoft) {
+ if (ObjCSynthesizedStructs.count(OMD->getClassInterface()))
+ ResultStr += "struct ";
+ }
+ // When rewriting for Microsoft, explicitly omit the structure name.
+ ResultStr += OMD->getClassInterface()->getNameAsString();
+ ResultStr += " *";
+ }
+ else
+ ResultStr += Context->getObjCClassType().getAsString();
+
+ ResultStr += " self, ";
+ ResultStr += Context->getObjCSelType().getAsString();
+ ResultStr += " _cmd";
+
+ // Method arguments.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PDecl = *PI;
+ ResultStr += ", ";
+ if (PDecl->getType()->isObjCQualifiedIdType()) {
+ ResultStr += "id ";
+ ResultStr += PDecl->getNameAsString();
+ } else {
+ std::string Name = PDecl->getNameAsString();
+ if (isTopLevelBlockPointerType(PDecl->getType())) {
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ const BlockPointerType *BPT = PDecl->getType()->getAsBlockPointerType();
+ Context->getPointerType(BPT->getPointeeType()).getAsStringInternal(Name,
+ Context->PrintingPolicy);
+ } else
+ PDecl->getType().getAsStringInternal(Name, Context->PrintingPolicy);
+ ResultStr += Name;
+ }
+ }
+ if (OMD->isVariadic())
+ ResultStr += ", ...";
+ ResultStr += ") ";
+
+ if (FPRetType) {
+ ResultStr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ ResultStr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) ResultStr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString();
+ ResultStr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) ResultStr += ", ";
+ ResultStr += "...";
+ }
+ ResultStr += ")";
+ } else {
+ ResultStr += "()";
+ }
+ }
+}
+void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
+ ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
+ ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
+
+ if (IMD)
+ InsertText(IMD->getLocStart(), "// ", 3);
+ else
+ InsertText(CID->getLocStart(), "// ", 3);
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ I = IMD ? IMD->instmeth_begin(*Context) : CID->instmeth_begin(*Context),
+ E = IMD ? IMD->instmeth_end(*Context) : CID->instmeth_end(*Context);
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody(*Context)->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf,
+ ResultStr.c_str(), ResultStr.size());
+ }
+
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ I = IMD ? IMD->classmeth_begin(*Context) : CID->classmeth_begin(*Context),
+ E = IMD ? IMD->classmeth_end(*Context) : CID->classmeth_end(*Context);
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody(*Context)->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf,
+ ResultStr.c_str(), ResultStr.size());
+ }
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ I = IMD ? IMD->propimpl_begin(*Context) : CID->propimpl_begin(*Context),
+ E = IMD ? IMD->propimpl_end(*Context) : CID->propimpl_end(*Context);
+ I != E; ++I) {
+ RewritePropertyImplDecl(*I, IMD, CID);
+ }
+
+ if (IMD)
+ InsertText(IMD->getLocEnd(), "// ", 3);
+ else
+ InsertText(CID->getLocEnd(), "// ", 3);
+}
+
+void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
+ std::string ResultStr;
+ if (!ObjCForwardDecls.count(ClassDecl)) {
+ // we haven't seen a forward decl - generate a typedef.
+ ResultStr = "#ifndef _REWRITER_typedef_";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += "\n";
+ ResultStr += "#define _REWRITER_typedef_";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += "\n";
+ ResultStr += "typedef struct objc_object ";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += ";\n#endif\n";
+ // Mark this typedef as having been generated.
+ ObjCForwardDecls.insert(ClassDecl);
+ }
+ SynthesizeObjCInternalStruct(ClassDecl, ResultStr);
+
+ for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(*Context),
+ E = ClassDecl->prop_end(*Context); I != E; ++I)
+ RewriteProperty(*I);
+ for (ObjCInterfaceDecl::instmeth_iterator
+ I = ClassDecl->instmeth_begin(*Context),
+ E = ClassDecl->instmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCInterfaceDecl::classmeth_iterator
+ I = ClassDecl->classmeth_begin(*Context),
+ E = ClassDecl->classmeth_end(*Context);
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(ClassDecl->getAtEndLoc(), 0, "// ", 3);
+}
+
+Stmt *RewriteObjC::RewritePropertySetter(BinaryOperator *BinOp, Expr *newStmt,
+ SourceRange SrcRange) {
+ // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr.
+ // This allows us to reuse all the fun and games in SynthMessageExpr().
+ ObjCPropertyRefExpr *PropRefExpr = dyn_cast<ObjCPropertyRefExpr>(BinOp->getLHS());
+ ObjCMessageExpr *MsgExpr;
+ ObjCPropertyDecl *PDecl = PropRefExpr->getProperty();
+ llvm::SmallVector<Expr *, 1> ExprVec;
+ ExprVec.push_back(newStmt);
+
+ Stmt *Receiver = PropRefExpr->getBase();
+ ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(Receiver);
+ if (PRE && PropGetters[PRE]) {
+ // This allows us to handle chain/nested property getters.
+ Receiver = PropGetters[PRE];
+ }
+ MsgExpr = new (Context) ObjCMessageExpr(dyn_cast<Expr>(Receiver),
+ PDecl->getSetterName(), PDecl->getType(),
+ PDecl->getSetterMethodDecl(),
+ SourceLocation(), SourceLocation(),
+ &ExprVec[0], 1);
+ Stmt *ReplacingStmt = SynthMessageExpr(MsgExpr);
+
+ // Now do the actual rewrite.
+ ReplaceStmtWithRange(BinOp, ReplacingStmt, SrcRange);
+ //delete BinOp;
+ // NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
+ // to things that stay around.
+ Context->Deallocate(MsgExpr);
+ return ReplacingStmt;
+}
+
+Stmt *RewriteObjC::RewritePropertyGetter(ObjCPropertyRefExpr *PropRefExpr) {
+ // Synthesize a ObjCMessageExpr from a ObjCPropertyRefExpr.
+ // This allows us to reuse all the fun and games in SynthMessageExpr().
+ ObjCMessageExpr *MsgExpr;
+ ObjCPropertyDecl *PDecl = PropRefExpr->getProperty();
+
+ Stmt *Receiver = PropRefExpr->getBase();
+
+ ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(Receiver);
+ if (PRE && PropGetters[PRE]) {
+ // This allows us to handle chain/nested property getters.
+ Receiver = PropGetters[PRE];
+ }
+ MsgExpr = new (Context) ObjCMessageExpr(dyn_cast<Expr>(Receiver),
+ PDecl->getGetterName(), PDecl->getType(),
+ PDecl->getGetterMethodDecl(),
+ SourceLocation(), SourceLocation(),
+ 0, 0);
+
+ Stmt *ReplacingStmt = SynthMessageExpr(MsgExpr);
+
+ if (!PropParentMap)
+ PropParentMap = new ParentMap(CurrentBody);
+
+ Stmt *Parent = PropParentMap->getParent(PropRefExpr);
+ if (Parent && isa<ObjCPropertyRefExpr>(Parent)) {
+ // We stash away the ReplacingStmt since actually doing the
+ // replacement/rewrite won't work for nested getters (e.g. obj.p.i)
+ PropGetters[PropRefExpr] = ReplacingStmt;
+ // NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
+ // to things that stay around.
+ Context->Deallocate(MsgExpr);
+ return PropRefExpr; // return the original...
+ } else {
+ ReplaceStmt(PropRefExpr, ReplacingStmt);
+ // delete PropRefExpr; elsewhere...
+ // NOTE: We don't want to call MsgExpr->Destroy(), as it holds references
+ // to things that stay around.
+ Context->Deallocate(MsgExpr);
+ return ReplacingStmt;
+ }
+}
+
+Stmt *RewriteObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV,
+ SourceLocation OrigStart) {
+ ObjCIvarDecl *D = IV->getDecl();
+ if (CurMethodDef) {
+ if (const PointerType *pType = IV->getBase()->getType()->getAsPointerType()) {
+ ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(pType->getPointeeType());
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(*Context,
+ D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName.c_str());
+ RecordDecl *RD = RecordDecl::Create(*Context, TagDecl::TK_struct, TUDecl,
+ SourceLocation(), II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = new (Context) CStyleCastExpr(castT, IV->getBase(),
+ castT,SourceLocation(),
+ SourceLocation());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
+ IV->getBase()->getLocEnd(),
+ castExpr);
+ if (IV->isFreeIvar() &&
+ CurMethodDef->getClassInterface() == iFaceDecl->getDecl()) {
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, D,
+ IV->getLocation(),
+ D->getType());
+ ReplaceStmt(IV, ME);
+ // delete IV; leak for now, see RewritePropertySetter() usage for more info.
+ return ME;
+ }
+
+ ReplaceStmt(IV->getBase(), PE);
+ // Cannot delete IV->getBase(), since PE points to it.
+ // Replace the old base with the cast. This is important when doing
+ // embedded rewrites. For example, [newInv->_container addObject:0].
+ IV->setBase(PE);
+ return IV;
+ }
+ } else { // we are outside a method.
+ assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method");
+
+ // Explicit ivar refs need to have a cast inserted.
+ // FIXME: consider sharing some of this code with the code above.
+ if (const PointerType *pType = IV->getBase()->getType()->getAsPointerType()) {
+ ObjCInterfaceType *iFaceDecl = dyn_cast<ObjCInterfaceType>(pType->getPointeeType());
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(*Context,
+ D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName.c_str());
+ RecordDecl *RD = RecordDecl::Create(*Context, TagDecl::TK_struct, TUDecl,
+ SourceLocation(), II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = new (Context) CStyleCastExpr(castT, IV->getBase(),
+ castT, SourceLocation(),
+ SourceLocation());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
+ IV->getBase()->getLocEnd(), castExpr);
+ ReplaceStmt(IV->getBase(), PE);
+ // Cannot delete IV->getBase(), since PE points to it.
+ // Replace the old base with the cast. This is important when doing
+ // embedded rewrites. For example, [newInv->_container addObject:0].
+ IV->setBase(PE);
+ return IV;
+ }
+ }
+ return IV;
+}
+
+/// SynthCountByEnumWithState - To print:
+/// ((unsigned int (*)
+/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+/// (void *)objc_msgSend)((id)l_collection,
+/// sel_registerName(
+/// "countByEnumeratingWithState:objects:count:"),
+/// &enumState,
+/// (id *)items, (unsigned int)16)
+///
+void RewriteObjC::SynthCountByEnumWithState(std::string &buf) {
+ buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, "
+ "id *, unsigned int))(void *)objc_msgSend)";
+ buf += "\n\t\t";
+ buf += "((id)l_collection,\n\t\t";
+ buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
+ buf += "\n\t\t";
+ buf += "&enumState, "
+ "(id *)items, (unsigned int)16)";
+}
+
+/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
+/// statement to exit to its outer synthesized loop.
+///
+Stmt *RewriteObjC::RewriteBreakStmt(BreakStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace break with goto __break_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("break"), buf.c_str(), buf.size());
+
+ return 0;
+}
+
+/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
+/// statement to continue with its inner synthesized loop.
+///
+Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace continue with goto __continue_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("continue"), buf.c_str(), buf.size());
+
+ return 0;
+}
+
+/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
+/// It rewrites:
+/// for ( type elem in collection) { stmts; }
+
+/// Into:
+/// {
+/// type elem;
+/// struct __objcFastEnumerationState enumState = { 0 };
+/// id items[16];
+/// id l_collection = (id)collection;
+/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:items count:16];
+/// if (limit) {
+/// unsigned long startMutations = *enumState.mutationsPtr;
+/// do {
+/// unsigned long counter = 0;
+/// do {
+/// if (startMutations != *enumState.mutationsPtr)
+/// objc_enumerationMutation(l_collection);
+/// elem = (type)enumState.itemsPtr[counter++];
+/// stmts;
+/// __continue_label: ;
+/// } while (counter < limit);
+/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:items count:16]);
+/// elem = nil;
+/// __break_label: ;
+/// }
+/// else
+/// elem = nil;
+/// }
+///
+Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd) {
+ assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
+ assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
+ "ObjCForCollectionStmt Statement stack mismatch");
+ assert(!ObjCBcLabelNo.empty() &&
+ "ObjCForCollectionStmt - Label No stack empty");
+
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *elementName;
+ std::string elementTypeAsString;
+ std::string buf;
+ buf = "\n{\n\t";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
+ // type elem;
+ NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
+ QualType ElementType = cast<ValueDecl>(D)->getType();
+ elementTypeAsString = ElementType.getAsString();
+ buf += elementTypeAsString;
+ buf += " ";
+ elementName = D->getNameAsCString();
+ buf += elementName;
+ buf += ";\n\t";
+ }
+ else {
+ DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
+ elementName = DR->getDecl()->getNameAsCString();
+ elementTypeAsString
+ = cast<ValueDecl>(DR->getDecl())->getType().getAsString();
+ }
+
+ // struct __objcFastEnumerationState enumState = { 0 };
+ buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
+ // id items[16];
+ buf += "id items[16];\n\t";
+ // id l_collection = (id)
+ buf += "id l_collection = (id)";
+ // Find start location of 'collection' the hard way!
+ const char *startCollectionBuf = startBuf;
+ startCollectionBuf += 3; // skip 'for'
+ startCollectionBuf = strchr(startCollectionBuf, '(');
+ startCollectionBuf++; // skip '('
+ // find 'in' and skip it.
+ while (*startCollectionBuf != ' ' ||
+ *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
+ (*(startCollectionBuf+3) != ' ' &&
+ *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
+ startCollectionBuf++;
+ startCollectionBuf += 3;
+
+ // Replace: "for (type element in" with string constructed thus far.
+ ReplaceText(startLoc, startCollectionBuf - startBuf,
+ buf.c_str(), buf.size());
+ // Replace ')' in for '(' type elem in collection ')' with ';'
+ SourceLocation rightParenLoc = S->getRParenLoc();
+ const char *rparenBuf = SM->getCharacterData(rightParenLoc);
+ SourceLocation lparenLoc = startLoc.getFileLocWithOffset(rparenBuf-startBuf);
+ buf = ";\n\t";
+
+ // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+ // objects:items count:16];
+ // which is synthesized into:
+ // unsigned int limit =
+ // ((unsigned int (*)
+ // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+ // (void *)objc_msgSend)((id)l_collection,
+ // sel_registerName(
+ // "countByEnumeratingWithState:objects:count:"),
+ // (struct __objcFastEnumerationState *)&state,
+ // (id *)items, (unsigned int)16);
+ buf += "unsigned long limit =\n\t\t";
+ SynthCountByEnumWithState(buf);
+ buf += ";\n\t";
+ /// if (limit) {
+ /// unsigned long startMutations = *enumState.mutationsPtr;
+ /// do {
+ /// unsigned long counter = 0;
+ /// do {
+ /// if (startMutations != *enumState.mutationsPtr)
+ /// objc_enumerationMutation(l_collection);
+ /// elem = (type)enumState.itemsPtr[counter++];
+ buf += "if (limit) {\n\t";
+ buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
+ buf += "do {\n\t\t";
+ buf += "unsigned long counter = 0;\n\t\t";
+ buf += "do {\n\t\t\t";
+ buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
+ buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
+ buf += elementName;
+ buf += " = (";
+ buf += elementTypeAsString;
+ buf += ")enumState.itemsPtr[counter++];";
+ // Replace ')' in for '(' type elem in collection ')' with all of these.
+ ReplaceText(lparenLoc, 1, buf.c_str(), buf.size());
+
+ /// __continue_label: ;
+ /// } while (counter < limit);
+ /// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+ /// objects:items count:16]);
+ /// elem = nil;
+ /// __break_label: ;
+ /// }
+ /// else
+ /// elem = nil;
+ /// }
+ ///
+ buf = ";\n\t";
+ buf += "__continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;";
+ buf += "\n\t\t";
+ buf += "} while (counter < limit);\n\t";
+ buf += "} while (limit = ";
+ SynthCountByEnumWithState(buf);
+ buf += ");\n\t";
+ buf += elementName;
+ buf += " = ((id)0);\n\t";
+ buf += "__break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;\n\t";
+ buf += "}\n\t";
+ buf += "else\n\t\t";
+ buf += elementName;
+ buf += " = ((id)0);\n";
+ buf += "}\n";
+
+ // Insert all these *after* the statement body.
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (isa<CompoundStmt>(S->getBody())) {
+ SourceLocation endBodyLoc = OrigEnd.getFileLocWithOffset(1);
+ InsertText(endBodyLoc, buf.c_str(), buf.size());
+ } else {
+ /* Need to treat single statements specially. For example:
+ *
+ * for (A *a in b) if (stuff()) break;
+ * for (A *a in b) xxxyy;
+ *
+ * The following code simply scans ahead to the semi to find the actual end.
+ */
+ const char *stmtBuf = SM->getCharacterData(OrigEnd);
+ const char *semiBuf = strchr(stmtBuf, ';');
+ assert(semiBuf && "Can't find ';'");
+ SourceLocation endBodyLoc = OrigEnd.getFileLocWithOffset(semiBuf-stmtBuf+1);
+ InsertText(endBodyLoc, buf.c_str(), buf.size());
+ }
+ Stmts.pop_back();
+ ObjCBcLabelNo.pop_back();
+ return 0;
+}
+
+/// RewriteObjCSynchronizedStmt -
+/// This routine rewrites @synchronized(expr) stmt;
+/// into:
+/// objc_sync_enter(expr);
+/// @try stmt @finally { objc_sync_exit(expr); }
+///
+Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @synchronized location");
+
+ std::string buf;
+ buf = "objc_sync_enter((id)";
+ const char *lparenBuf = startBuf;
+ while (*lparenBuf != '(') lparenBuf++;
+ ReplaceText(startLoc, lparenBuf-startBuf+1, buf.c_str(), buf.size());
+ // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // the sync expression is typically a message expression that's already
+ // been rewritten! (which implies the SourceLocation's are invalid).
+ SourceLocation endLoc = S->getSynchBody()->getLocStart();
+ const char *endBuf = SM->getCharacterData(endLoc);
+ while (*endBuf != ')') endBuf--;
+ SourceLocation rparenLoc = startLoc.getFileLocWithOffset(endBuf-startBuf);
+ buf = ");\n";
+ // declare a new scope with two variables, _stack and _rethrow.
+ buf += "/* @try scope begin */ \n{ struct _objc_exception_data {\n";
+ buf += "int buf[18/*32-bit i386*/];\n";
+ buf += "char *pointers[4];} _stack;\n";
+ buf += "id volatile _rethrow = 0;\n";
+ buf += "objc_exception_try_enter(&_stack);\n";
+ buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
+ ReplaceText(rparenLoc, 1, buf.c_str(), buf.size());
+ startLoc = S->getSynchBody()->getLocEnd();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '}') && "bogus @synchronized block");
+ SourceLocation lastCurlyLoc = startLoc;
+ buf = "}\nelse {\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += "}\n";
+ buf += "{ /* implicit finally clause */\n";
+ buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
+ buf += " objc_sync_exit(";
+ Expr *syncExpr = new (Context) CStyleCastExpr(Context->getObjCIdType(),
+ S->getSynchExpr(),
+ Context->getObjCIdType(),
+ SourceLocation(),
+ SourceLocation());
+ std::string syncExprBufS;
+ llvm::raw_string_ostream syncExprBuf(syncExprBufS);
+ syncExpr->printPretty(syncExprBuf, *Context);
+ buf += syncExprBuf.str();
+ buf += ");\n";
+ buf += " if (_rethrow) objc_exception_throw(_rethrow);\n";
+ buf += "}\n";
+ buf += "}";
+
+ ReplaceText(lastCurlyLoc, 1, buf.c_str(), buf.size());
+ return 0;
+}
+
+void RewriteObjC::WarnAboutReturnGotoContinueOrBreakStmts(Stmt *S) {
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI)
+ WarnAboutReturnGotoContinueOrBreakStmts(*CI);
+
+ if (isa<ReturnStmt>(S) || isa<ContinueStmt>(S) ||
+ isa<BreakStmt>(S) || isa<GotoStmt>(S)) {
+ Diags.Report(Context->getFullLoc(S->getLocStart()),
+ TryFinallyContainsReturnDiag);
+ }
+ return;
+}
+
+Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @try location");
+
+ std::string buf;
+ // declare a new scope with two variables, _stack and _rethrow.
+ buf = "/* @try scope begin */ { struct _objc_exception_data {\n";
+ buf += "int buf[18/*32-bit i386*/];\n";
+ buf += "char *pointers[4];} _stack;\n";
+ buf += "id volatile _rethrow = 0;\n";
+ buf += "objc_exception_try_enter(&_stack);\n";
+ buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
+
+ ReplaceText(startLoc, 4, buf.c_str(), buf.size());
+
+ startLoc = S->getTryBody()->getLocEnd();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '}') && "bogus @try block");
+
+ SourceLocation lastCurlyLoc = startLoc;
+ ObjCAtCatchStmt *catchList = S->getCatchStmts();
+ if (catchList) {
+ startLoc = startLoc.getFileLocWithOffset(1);
+ buf = " /* @catch begin */ else {\n";
+ buf += " id _caught = objc_exception_extract(&_stack);\n";
+ buf += " objc_exception_try_enter (&_stack);\n";
+ buf += " if (_setjmp(_stack.buf))\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += " else { /* @catch continue */";
+
+ InsertText(startLoc, buf.c_str(), buf.size());
+ } else { /* no catch list */
+ buf = "}\nelse {\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += "}";
+ ReplaceText(lastCurlyLoc, 1, buf.c_str(), buf.size());
+ }
+ bool sawIdTypedCatch = false;
+ Stmt *lastCatchBody = 0;
+ while (catchList) {
+ ParmVarDecl *catchDecl = catchList->getCatchParamDecl();
+
+ if (catchList == S->getCatchStmts())
+ buf = "if ("; // we are generating code for the first catch clause
+ else
+ buf = "else if (";
+ startLoc = catchList->getLocStart();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @catch location");
+
+ const char *lParenLoc = strchr(startBuf, '(');
+
+ if (catchList->hasEllipsis()) {
+ // Now rewrite the body...
+ lastCatchBody = catchList->getCatchBody();
+ SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ const char *bodyBuf = SM->getCharacterData(bodyLoc);
+ assert(*SM->getCharacterData(catchList->getRParenLoc()) == ')' &&
+ "bogus @catch paren location");
+ assert((*bodyBuf == '{') && "bogus @catch body location");
+
+ buf += "1) { id _tmp = _caught;";
+ Rewrite.ReplaceText(startLoc, bodyBuf-startBuf+1,
+ buf.c_str(), buf.size());
+ } else if (catchDecl) {
+ QualType t = catchDecl->getType();
+ if (t == Context->getObjCIdType()) {
+ buf += "1) { ";
+ ReplaceText(startLoc, lParenLoc-startBuf+1, buf.c_str(), buf.size());
+ sawIdTypedCatch = true;
+ } else if (const PointerType *pType = t->getAsPointerType()) {
+ ObjCInterfaceType *cls; // Should be a pointer to a class.
+
+ cls = dyn_cast<ObjCInterfaceType>(pType->getPointeeType().getTypePtr());
+ if (cls) {
+ buf += "objc_exception_match((struct objc_class *)objc_getClass(\"";
+ buf += cls->getDecl()->getNameAsString();
+ buf += "\"), (struct objc_object *)_caught)) { ";
+ ReplaceText(startLoc, lParenLoc-startBuf+1, buf.c_str(), buf.size());
+ }
+ }
+ // Now rewrite the body...
+ lastCatchBody = catchList->getCatchBody();
+ SourceLocation rParenLoc = catchList->getRParenLoc();
+ SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ const char *bodyBuf = SM->getCharacterData(bodyLoc);
+ const char *rParenBuf = SM->getCharacterData(rParenLoc);
+ assert((*rParenBuf == ')') && "bogus @catch paren location");
+ assert((*bodyBuf == '{') && "bogus @catch body location");
+
+ buf = " = _caught;";
+ // Here we replace ") {" with "= _caught;" (which initializes and
+ // declares the @catch parameter).
+ ReplaceText(rParenLoc, bodyBuf-rParenBuf+1, buf.c_str(), buf.size());
+ } else {
+ assert(false && "@catch rewrite bug");
+ }
+ // make sure all the catch bodies get rewritten!
+ catchList = catchList->getNextCatchStmt();
+ }
+ // Complete the catch list...
+ if (lastCatchBody) {
+ SourceLocation bodyLoc = lastCatchBody->getLocEnd();
+ assert(*SM->getCharacterData(bodyLoc) == '}' &&
+ "bogus @catch body location");
+
+ // Insert the last (implicit) else clause *before* the right curly brace.
+ bodyLoc = bodyLoc.getFileLocWithOffset(-1);
+ buf = "} /* last catch end */\n";
+ buf += "else {\n";
+ buf += " _rethrow = _caught;\n";
+ buf += " objc_exception_try_exit(&_stack);\n";
+ buf += "} } /* @catch end */\n";
+ if (!S->getFinallyStmt())
+ buf += "}\n";
+ InsertText(bodyLoc, buf.c_str(), buf.size());
+
+ // Set lastCurlyLoc
+ lastCurlyLoc = lastCatchBody->getLocEnd();
+ }
+ if (ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt()) {
+ startLoc = finalStmt->getLocStart();
+ startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @finally start");
+
+ buf = "/* @finally */";
+ ReplaceText(startLoc, 8, buf.c_str(), buf.size());
+
+ Stmt *body = finalStmt->getFinallyBody();
+ SourceLocation startLoc = body->getLocStart();
+ SourceLocation endLoc = body->getLocEnd();
+ assert(*SM->getCharacterData(startLoc) == '{' &&
+ "bogus @finally body location");
+ assert(*SM->getCharacterData(endLoc) == '}' &&
+ "bogus @finally body location");
+
+ startLoc = startLoc.getFileLocWithOffset(1);
+ buf = " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
+ InsertText(startLoc, buf.c_str(), buf.size());
+ endLoc = endLoc.getFileLocWithOffset(-1);
+ buf = " if (_rethrow) objc_exception_throw(_rethrow);\n";
+ InsertText(endLoc, buf.c_str(), buf.size());
+
+ // Set lastCurlyLoc
+ lastCurlyLoc = body->getLocEnd();
+
+ // Now check for any return/continue/go statements within the @try.
+ WarnAboutReturnGotoContinueOrBreakStmts(S->getTryBody());
+ } else { /* no finally clause - make sure we synthesize an implicit one */
+ buf = "{ /* implicit finally clause */\n";
+ buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
+ buf += " if (_rethrow) objc_exception_throw(_rethrow);\n";
+ buf += "}";
+ ReplaceText(lastCurlyLoc, 1, buf.c_str(), buf.size());
+ }
+ // Now emit the final closing curly brace...
+ lastCurlyLoc = lastCurlyLoc.getFileLocWithOffset(1);
+ buf = " } /* @try scope end */\n";
+ InsertText(lastCurlyLoc, buf.c_str(), buf.size());
+ return 0;
+}
+
+Stmt *RewriteObjC::RewriteObjCCatchStmt(ObjCAtCatchStmt *S) {
+ return 0;
+}
+
+Stmt *RewriteObjC::RewriteObjCFinallyStmt(ObjCAtFinallyStmt *S) {
+ return 0;
+}
+
+// This can't be done with ReplaceStmt(S, ThrowExpr), since
+// the throw expression is typically a message expression that's already
+// been rewritten! (which implies the SourceLocation's are invalid).
+Stmt *RewriteObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @throw location");
+
+ std::string buf;
+ /* void objc_exception_throw(id) __attribute__((noreturn)); */
+ if (S->getThrowExpr())
+ buf = "objc_exception_throw(";
+ else // add an implicit argument
+ buf = "objc_exception_throw(_caught";
+
+ // handle "@ throw" correctly.
+ const char *wBuf = strchr(startBuf, 'w');
+ assert((*wBuf == 'w') && "@throw: can't find 'w'");
+ ReplaceText(startLoc, wBuf-startBuf+1, buf.c_str(), buf.size());
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@throw: can't find ';'");
+ SourceLocation semiLoc = startLoc.getFileLocWithOffset(semiBuf-startBuf);
+ buf = ");";
+ ReplaceText(semiLoc, 1, buf.c_str(), buf.size());
+ return 0;
+}
+
+Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
+ // Create a new string expression.
+ QualType StrType = Context->getPointerType(Context->CharTy);
+ std::string StrEncoding;
+ Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
+ Expr *Replacement = StringLiteral::Create(*Context,StrEncoding.c_str(),
+ StrEncoding.length(), false,StrType,
+ SourceLocation());
+ ReplaceStmt(Exp, Replacement);
+
+ // Replace this subexpr in the parent.
+ // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ return Replacement;
+}
+
+Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
+ // Create a call to sel_registerName("selName").
+ llvm::SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString().c_str(),
+ Exp->getSelector().getAsString().size(),
+ false, argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size());
+ ReplaceStmt(Exp, SelExp);
+ // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ return SelExp;
+}
+
+CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
+ FunctionDecl *FD, Expr **args, unsigned nargs) {
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = FD->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, msgSendType, SourceLocation());
+
+ // Now, we cast the reference to a pointer to the objc_msgSend type.
+ QualType pToFunc = Context->getPointerType(msgSendType);
+ ImplicitCastExpr *ICE = new (Context) ImplicitCastExpr(pToFunc, DRE,
+ /*isLvalue=*/false);
+
+ const FunctionType *FT = msgSendType->getAsFunctionType();
+
+ return new (Context) CallExpr(*Context, ICE, args, nargs, FT->getResultType(),
+ SourceLocation());
+}
+
+static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
+ const char *&startRef, const char *&endRef) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '<')
+ startRef = startBuf; // mark the start.
+ if (*startBuf == '>') {
+ if (startRef && *startRef == '<') {
+ endRef = startBuf; // mark the end.
+ return true;
+ }
+ return false;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+static void scanToNextArgument(const char *&argRef) {
+ int angle = 0;
+ while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
+ if (*argRef == '<')
+ angle++;
+ else if (*argRef == '>')
+ angle--;
+ argRef++;
+ }
+ assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
+}
+
+bool RewriteObjC::needToScanForQualifiers(QualType T) {
+
+ if (T->isObjCQualifiedIdType())
+ return true;
+
+ if (const PointerType *pType = T->getAsPointerType()) {
+ Type *pointeeType = pType->getPointeeType().getTypePtr();
+ if (isa<ObjCQualifiedInterfaceType>(pointeeType))
+ return true; // we have "Class <Protocol> *".
+ }
+ return false;
+}
+
+void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
+ QualType Type = E->getType();
+ if (needToScanForQualifiers(Type)) {
+ SourceLocation Loc, EndLoc;
+
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
+ Loc = ECE->getLParenLoc();
+ EndLoc = ECE->getRParenLoc();
+ } else {
+ Loc = E->getLocStart();
+ EndLoc = E->getLocEnd();
+ }
+ // This will defend against trying to rewrite synthesized expressions.
+ if (Loc.isInvalid() || EndLoc.isInvalid())
+ return;
+
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *endBuf = SM->getCharacterData(EndLoc);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getFileLocWithOffset(startRef-startBuf);
+ SourceLocation GreaterLoc = Loc.getFileLocWithOffset(endRef-startBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*", 2);
+ InsertText(GreaterLoc, "*/", 2);
+ }
+ }
+}
+
+void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
+ SourceLocation Loc;
+ QualType Type;
+ const FunctionProtoType *proto = 0;
+ if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
+ Loc = VD->getLocation();
+ Type = VD->getType();
+ }
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ // Check for ObjC 'id' and class types that have been adorned with protocol
+ // information (id<p>, C<p>*). The protocol references need to be rewritten!
+ const FunctionType *funcType = FD->getType()->getAsFunctionType();
+ assert(funcType && "missing function type");
+ proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ Type = proto->getResultType();
+ }
+ else
+ return;
+
+ if (needToScanForQualifiers(Type)) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = SM->getCharacterData(Loc);
+ const char *startBuf = endBuf;
+ while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
+ startBuf--; // scan backward (from the decl location) for return type.
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getFileLocWithOffset(startRef-endBuf);
+ SourceLocation GreaterLoc = Loc.getFileLocWithOffset(endRef-endBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*", 2);
+ InsertText(GreaterLoc, "*/", 2);
+ }
+ }
+ if (!proto)
+ return; // most likely, was a variable
+ // Now check arguments.
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *startFuncBuf = startBuf;
+ for (unsigned i = 0; i < proto->getNumArgs(); i++) {
+ if (needToScanForQualifiers(proto->getArgType(i))) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = startBuf;
+ // scan forward (from the decl location) for argument types.
+ scanToNextArgument(endBuf);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc =
+ Loc.getFileLocWithOffset(startRef-startFuncBuf);
+ SourceLocation GreaterLoc =
+ Loc.getFileLocWithOffset(endRef-startFuncBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*", 2);
+ InsertText(GreaterLoc, "*/", 2);
+ }
+ startBuf = ++endBuf;
+ }
+ else {
+ // If the function name is derived from a macro expansion, then the
+ // argument buffer will not follow the name. Need to speak with Chris.
+ while (*startBuf && *startBuf != ')' && *startBuf != ',')
+ startBuf++; // scan forward (from the decl location) for argument types.
+ startBuf++;
+ }
+ }
+}
+
+// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
+void RewriteObjC::SynthSelGetUidFunctionDecl() {
+ IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(
+ Context->CharTy.getQualifiedType(QualType::Const)));
+ QualType getFuncType = Context->getFunctionType(Context->getObjCSelType(),
+ &ArgTys[0], ArgTys.size(),
+ false /*isVariadic*/, 0);
+ SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SelGetUidIdent, getFuncType,
+ FunctionDecl::Extern, false);
+}
+
+void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) {
+ // declared in <objc/objc.h>
+ if (FD->getIdentifier() &&
+ strcmp(FD->getNameAsCString(), "sel_registerName") == 0) {
+ SelGetUidFunctionDecl = FD;
+ return;
+ }
+ RewriteObjCQualifiedInterfaceTypes(FD);
+}
+
+// SynthSuperContructorFunctionDecl - id objc_super(id obj, id super);
+void RewriteObjC::SynthSuperContructorFunctionDecl() {
+ if (SuperContructorFunctionDecl)
+ return;
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ ArgTys.push_back(argT);
+ QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ false, 0);
+ SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ FunctionDecl::Extern, false);
+}
+
+// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/, 0);
+ MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ FunctionDecl::Extern, false);
+}
+
+// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...);
+void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TagDecl::TK_struct, TUDecl,
+ SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/, 0);
+ MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ FunctionDecl::Extern, false);
+}
+
+// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/, 0);
+ MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ FunctionDecl::Extern, false);
+}
+
+// SynthMsgSendSuperStretFunctionDecl -
+// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...);
+void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent =
+ &Context->Idents.get("objc_msgSendSuper_stret");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TagDecl::TK_struct, TUDecl,
+ SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/, 0);
+ MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ FunctionDecl::Extern, false);
+}
+
+// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = Context->getFunctionType(Context->DoubleTy,
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/, 0);
+ MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ msgSendIdent, msgSendType,
+ FunctionDecl::Extern, false);
+}
+
+// SynthGetClassFunctionDecl - id objc_getClass(const char *name);
+void RewriteObjC::SynthGetClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(
+ Context->CharTy.getQualifiedType(QualType::Const)));
+ QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ false /*isVariadic*/, 0);
+ GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ getClassIdent, getClassType,
+ FunctionDecl::Extern, false);
+}
+
+// SynthGetMetaClassFunctionDecl - id objc_getClass(const char *name);
+void RewriteObjC::SynthGetMetaClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
+ llvm::SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(
+ Context->CharTy.getQualifiedType(QualType::Const)));
+ QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ false /*isVariadic*/, 0);
+ GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ getClassIdent, getClassType,
+ FunctionDecl::Extern, false);
+}
+
+Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
+ QualType strType = getConstantStringStructType();
+
+ std::string S = "__NSConstantStringImpl_";
+
+ std::string tmpName = InFileName;
+ unsigned i;
+ for (i=0; i < tmpName.length(); i++) {
+ char c = tmpName.at(i);
+ // replace any non alphanumeric characters with '_'.
+ if (!isalpha(c) && (c < '0' || c > '9'))
+ tmpName[i] = '_';
+ }
+ S += tmpName;
+ S += "_";
+ S += utostr(NumObjCStringLiterals++);
+
+ Preamble += "static __NSConstantStringImpl " + S;
+ Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
+ Preamble += "0x000007c8,"; // utf8_str
+ // The pretty printer for StringLiteral handles escape characters properly.
+ std::string prettyBufS;
+ llvm::raw_string_ostream prettyBuf(prettyBufS);
+ Exp->getString()->printPretty(prettyBuf, *Context);
+ Preamble += prettyBuf.str();
+ Preamble += ",";
+ // The minus 2 removes the begin/end double quotes.
+ Preamble += utostr(prettyBuf.str().size()-2) + "};\n";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ &Context->Idents.get(S.c_str()), strType,
+ VarDecl::Static);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, strType, SourceLocation());
+ Expr *Unop = new (Context) UnaryOperator(DRE, UnaryOperator::AddrOf,
+ Context->getPointerType(DRE->getType()),
+ SourceLocation());
+ // cast to NSConstantString *
+ CastExpr *cast = new (Context) CStyleCastExpr(Exp->getType(), Unop,
+ Exp->getType(), SourceLocation(), SourceLocation());
+ ReplaceStmt(Exp, cast);
+ // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ return cast;
+}
+
+ObjCInterfaceDecl *RewriteObjC::isSuperReceiver(Expr *recExpr) {
+ // check if we are sending a message to 'super'
+ if (!CurMethodDef || !CurMethodDef->isInstanceMethod()) return 0;
+
+ if (ObjCSuperExpr *Super = dyn_cast<ObjCSuperExpr>(recExpr)) {
+ const PointerType *PT = Super->getType()->getAsPointerType();
+ assert(PT);
+ ObjCInterfaceType *IT = cast<ObjCInterfaceType>(PT->getPointeeType());
+ return IT->getDecl();
+ }
+ return 0;
+}
+
+// struct objc_super { struct objc_object *receiver; struct objc_class *super; };
+QualType RewriteObjC::getSuperStructType() {
+ if (!SuperStructDecl) {
+ SuperStructDecl = RecordDecl::Create(*Context, TagDecl::TK_struct, TUDecl,
+ SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType FieldTypes[2];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // struct objc_class *super;
+ FieldTypes[1] = Context->getObjCClassType();
+
+ // Create fields
+ for (unsigned i = 0; i < 2; ++i) {
+ SuperStructDecl->addDecl(*Context,
+ FieldDecl::Create(*Context, SuperStructDecl,
+ SourceLocation(), 0,
+ FieldTypes[i], /*BitWidth=*/0,
+ /*Mutable=*/false));
+ }
+
+ SuperStructDecl->completeDefinition(*Context);
+ }
+ return Context->getTagDeclType(SuperStructDecl);
+}
+
+QualType RewriteObjC::getConstantStringStructType() {
+ if (!ConstantStringDecl) {
+ ConstantStringDecl = RecordDecl::Create(*Context, TagDecl::TK_struct, TUDecl,
+ SourceLocation(),
+ &Context->Idents.get("__NSConstantStringImpl"));
+ QualType FieldTypes[4];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // int flags;
+ FieldTypes[1] = Context->IntTy;
+ // char *str;
+ FieldTypes[2] = Context->getPointerType(Context->CharTy);
+ // long length;
+ FieldTypes[3] = Context->LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ ConstantStringDecl->addDecl(*Context,
+ FieldDecl::Create(*Context,
+ ConstantStringDecl,
+ SourceLocation(), 0,
+ FieldTypes[i],
+ /*BitWidth=*/0,
+ /*Mutable=*/true));
+ }
+
+ ConstantStringDecl->completeDefinition(*Context);
+ }
+ return Context->getTagDeclType(ConstantStringDecl);
+}
+
+Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!MsgSendSuperFunctionDecl)
+ SynthMsgSendSuperFunctionDecl();
+ if (!MsgSendStretFunctionDecl)
+ SynthMsgSendStretFunctionDecl();
+ if (!MsgSendSuperStretFunctionDecl)
+ SynthMsgSendSuperStretFunctionDecl();
+ if (!MsgSendFpretFunctionDecl)
+ SynthMsgSendFpretFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+ if (!GetMetaClassFunctionDecl)
+ SynthGetMetaClassFunctionDecl();
+
+ // default to objc_msgSend().
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ // May need to use objc_msgSend_stret() as well.
+ FunctionDecl *MsgSendStretFlavor = 0;
+ if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
+ QualType resultType = mDecl->getResultType();
+ if (resultType->isStructureType() || resultType->isUnionType())
+ MsgSendStretFlavor = MsgSendStretFunctionDecl;
+ else if (resultType->isRealFloatingType())
+ MsgSendFlavor = MsgSendFpretFunctionDecl;
+ }
+
+ // Synthesize a call to objc_msgSend().
+ llvm::SmallVector<Expr*, 8> MsgExprs;
+ IdentifierInfo *clsName = Exp->getClassName();
+
+ // Derive/push the receiver/selector, 2 implicit arguments to objc_msgSend().
+ if (clsName) { // class message.
+ // FIXME: We need to fix Sema (and the AST for ObjCMessageExpr) to handle
+ // the 'super' idiom within a class method.
+ if (!strcmp(clsName->getName(), "super")) {
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+
+ ObjCInterfaceDecl *SuperDecl =
+ CurMethodDef->getClassInterface()->getSuperClass();
+
+ llvm::SmallVector<Expr*, 4> InitExprs;
+
+ // set the receiver to self, the first argument to all methods.
+ InitExprs.push_back(
+ new (Context) CStyleCastExpr(Context->getObjCIdType(),
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ Context->getObjCIdType(),
+ SourceLocation()),
+ Context->getObjCIdType(),
+ SourceLocation(), SourceLocation())); // set the 'receiver'.
+
+ llvm::SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ SuperDecl->getIdentifier()->getName(),
+ SuperDecl->getIdentifier()->getLength(),
+ false, argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size());
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back( // set 'super class', using objc_getClass().
+ new (Context) CStyleCastExpr(Context->getObjCIdType(),
+ Cls, Context->getObjCIdType(),
+ SourceLocation(), SourceLocation()));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.Microsoft) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ superType, SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UnaryOperator::AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ SourceLocation());
+ SuperRep = new (Context) CStyleCastExpr(Context->getPointerType(superType),
+ SuperRep, Context->getPointerType(superType),
+ SourceLocation(), SourceLocation());
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE = new (Context) InitListExpr(SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superType, ILE,
+ false);
+ // struct objc_super *
+ SuperRep = new (Context) UnaryOperator(SuperRep, UnaryOperator::AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ SourceLocation());
+ }
+ MsgExprs.push_back(SuperRep);
+ } else {
+ llvm::SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ clsName->getLength(),
+ false, argType,
+ SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size());
+ MsgExprs.push_back(Cls);
+ }
+ } else { // instance message.
+ Expr *recExpr = Exp->getReceiver();
+
+ if (ObjCInterfaceDecl *SuperDecl = isSuperReceiver(recExpr)) {
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+
+ llvm::SmallVector<Expr*, 4> InitExprs;
+
+ InitExprs.push_back(
+ new (Context) CStyleCastExpr(Context->getObjCIdType(),
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ Context->getObjCIdType(),
+ SourceLocation()),
+ Context->getObjCIdType(),
+ SourceLocation(), SourceLocation())); // set the 'receiver'.
+
+ llvm::SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ SuperDecl->getIdentifier()->getName(),
+ SuperDecl->getIdentifier()->getLength(),
+ false, argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size());
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back(
+ // set 'super class', using objc_getClass().
+ new (Context) CStyleCastExpr(Context->getObjCIdType(),
+ Cls, Context->getObjCIdType(), SourceLocation(), SourceLocation()));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.Microsoft) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ superType, SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UnaryOperator::AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ SourceLocation());
+ SuperRep = new (Context) CStyleCastExpr(Context->getPointerType(superType),
+ SuperRep, Context->getPointerType(superType),
+ SourceLocation(), SourceLocation());
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE = new (Context) InitListExpr(SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superType, ILE, false);
+ }
+ MsgExprs.push_back(SuperRep);
+ } else {
+ // Remove all type-casts because it may contain objc-style types; e.g.
+ // Foo<Proto> *.
+ while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
+ recExpr = CE->getSubExpr();
+ recExpr = new (Context) CStyleCastExpr(Context->getObjCIdType(), recExpr,
+ Context->getObjCIdType(),
+ SourceLocation(), SourceLocation());
+ MsgExprs.push_back(recExpr);
+ }
+ }
+ // Create a call to sel_registerName("selName"), it will be the 2nd argument.
+ llvm::SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString().c_str(),
+ Exp->getSelector().getAsString().size(),
+ false, argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size());
+ MsgExprs.push_back(SelExp);
+
+ // Now push any user supplied arguments.
+ for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
+ Expr *userExpr = Exp->getArg(i);
+ // Make all implicit casts explicit...ICE comes in handy:-)
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ // Reuse the ICE type, it is exactly what the doctor ordered.
+ QualType type = ICE->getType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType()
+ : ICE->getType();
+ userExpr = new (Context) CStyleCastExpr(type, userExpr, type, SourceLocation(), SourceLocation());
+ }
+ // Make id<P...> cast into an 'id' cast.
+ else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
+ if (CE->getType()->isObjCQualifiedIdType()) {
+ while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
+ userExpr = CE->getSubExpr();
+ userExpr = new (Context) CStyleCastExpr(Context->getObjCIdType(),
+ userExpr, Context->getObjCIdType(),
+ SourceLocation(), SourceLocation());
+ }
+ }
+ MsgExprs.push_back(userExpr);
+ // We've transferred the ownership to MsgExprs. For now, we *don't* null
+ // out the argument in the original expression (since we aren't deleting
+ // the ObjCMessageExpr). See RewritePropertySetter() usage for more info.
+ //Exp->setArg(i, 0);
+ }
+ // Generate the funky cast.
+ CastExpr *cast;
+ llvm::SmallVector<QualType, 8> ArgTypes;
+ QualType returnType;
+
+ // Push 'id' and 'SEL', the 2 implicit arguments.
+ if (MsgSendFlavor == MsgSendSuperFunctionDecl)
+ ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
+ else
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
+ // Push any user argument types.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ QualType t = (*PI)->getType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType()
+ : (*PI)->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (isTopLevelBlockPointerType(t)) {
+ const BlockPointerType *BPT = t->getAsBlockPointerType();
+ t = Context->getPointerType(BPT->getPointeeType());
+ }
+ ArgTypes.push_back(t);
+ }
+ returnType = OMD->getResultType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType() : OMD->getResultType();
+ } else {
+ returnType = Context->getObjCIdType();
+ }
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, msgSendType,
+ SourceLocation());
+
+ // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
+ // If we don't do this cast, we get the following bizarre warning/note:
+ // xx.m:13: warning: function called through a non-compatible type
+ // xx.m:13: note: if this code is reached, the program will abort
+ cast = new (Context) CStyleCastExpr(Context->getPointerType(Context->VoidTy), DRE,
+ Context->getPointerType(Context->VoidTy),
+ SourceLocation(), SourceLocation());
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType = Context->getFunctionType(returnType,
+ &ArgTypes[0], ArgTypes.size(),
+ // If we don't have a method decl, force a variadic cast.
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true, 0);
+ castType = Context->getPointerType(castType);
+ cast = new (Context) CStyleCastExpr(castType, cast, castType, SourceLocation(), SourceLocation());
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+
+ const FunctionType *FT = msgSendType->getAsFunctionType();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), SourceLocation());
+ Stmt *ReplacingStmt = CE;
+ if (MsgSendStretFlavor) {
+ // We have the method which returns a struct/union. Must also generate
+ // call to objc_msgSend_stret and hang both varieties on a conditional
+ // expression which dictate which one to envoke depending on size of
+ // method's return type.
+
+ // Create a reference to the objc_msgSend_stret() declaration.
+ DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor, msgSendType,
+ SourceLocation());
+ // Need to cast objc_msgSend_stret to "void *" (see above comment).
+ cast = new (Context) CStyleCastExpr(Context->getPointerType(Context->VoidTy), STDRE,
+ Context->getPointerType(Context->VoidTy),
+ SourceLocation(), SourceLocation());
+ // Now do the "normal" pointer to function cast.
+ castType = Context->getFunctionType(returnType,
+ &ArgTypes[0], ArgTypes.size(),
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false, 0);
+ castType = Context->getPointerType(castType);
+ cast = new (Context) CStyleCastExpr(castType, cast, castType, SourceLocation(), SourceLocation());
+
+ // Don't forget the parens to enforce the proper binding.
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+
+ FT = msgSendType->getAsFunctionType();
+ CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), SourceLocation());
+
+ // Build sizeof(returnType)
+ SizeOfAlignOfExpr *sizeofExpr = new (Context) SizeOfAlignOfExpr(true,
+ returnType,
+ Context->getSizeType(),
+ SourceLocation(), SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases.
+ // For X86 it is more complicated and some kind of target specific routine
+ // is needed to decide what to do.
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ IntegerLiteral *limit = new (Context) IntegerLiteral(llvm::APInt(IntSize, 8),
+ Context->IntTy,
+ SourceLocation());
+ BinaryOperator *lessThanExpr = new (Context) BinaryOperator(sizeofExpr, limit,
+ BinaryOperator::LE,
+ Context->IntTy,
+ SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(lessThanExpr, CE, STCE, returnType);
+ ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(), CondExpr);
+ }
+ // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ return ReplacingStmt;
+}
+
+Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
+ Stmt *ReplacingStmt = SynthMessageExpr(Exp);
+
+ // Now do the actual rewrite.
+ ReplaceStmt(Exp, ReplacingStmt);
+
+ // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ return ReplacingStmt;
+}
+
+// typedef struct objc_object Protocol;
+QualType RewriteObjC::getProtocolType() {
+ if (!ProtocolTypeDecl) {
+ ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ &Context->Idents.get("Protocol"),
+ Context->getObjCIdType());
+ }
+ return Context->getTypeDeclType(ProtocolTypeDecl);
+}
+
+/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
+/// a synthesized/forward data reference (to the protocol's metadata).
+/// The forward references (and metadata) are generated in
+/// RewriteObjC::HandleTranslationUnit().
+Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
+ std::string Name = "_OBJC_PROTOCOL_" + Exp->getProtocol()->getNameAsString();
+ IdentifierInfo *ID = &Context->Idents.get(Name);
+ VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ ID, QualType()/*UNUSED*/, VarDecl::Extern);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, getProtocolType(), SourceLocation());
+ Expr *DerefExpr = new (Context) UnaryOperator(DRE, UnaryOperator::AddrOf,
+ Context->getPointerType(DRE->getType()),
+ SourceLocation());
+ CastExpr *castExpr = new (Context) CStyleCastExpr(DerefExpr->getType(), DerefExpr,
+ DerefExpr->getType(),
+ SourceLocation(), SourceLocation());
+ ReplaceStmt(Exp, castExpr);
+ ProtocolExprDecls.insert(Exp->getProtocol());
+ // delete Exp; leak for now, see RewritePropertySetter() usage for more info.
+ return castExpr;
+
+}
+
+bool RewriteObjC::BufferContainsPPDirectives(const char *startBuf,
+ const char *endBuf) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '#') {
+ // Skip whitespace.
+ for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
+ ;
+ if (!strncmp(startBuf, "if", strlen("if")) ||
+ !strncmp(startBuf, "ifdef", strlen("ifdef")) ||
+ !strncmp(startBuf, "ifndef", strlen("ifndef")) ||
+ !strncmp(startBuf, "define", strlen("define")) ||
+ !strncmp(startBuf, "undef", strlen("undef")) ||
+ !strncmp(startBuf, "else", strlen("else")) ||
+ !strncmp(startBuf, "elif", strlen("elif")) ||
+ !strncmp(startBuf, "endif", strlen("endif")) ||
+ !strncmp(startBuf, "pragma", strlen("pragma")) ||
+ !strncmp(startBuf, "include", strlen("include")) ||
+ !strncmp(startBuf, "import", strlen("import")) ||
+ !strncmp(startBuf, "include_next", strlen("include_next")))
+ return true;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+/// SynthesizeObjCInternalStruct - Rewrite one internal struct corresponding to
+/// an objective-c class with ivars.
+void RewriteObjC::SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
+ assert(CDecl->getNameAsCString() &&
+ "Name missing in SynthesizeObjCInternalStruct");
+ // Do not synthesize more than once.
+ if (ObjCSynthesizedStructs.count(CDecl))
+ return;
+ ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
+ int NumIvars = CDecl->ivar_size();
+ SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocEnd = CDecl->getLocEnd();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // If no ivars and no root or if its root, directly or indirectly,
+ // have no ivars (thus not synthesized) then no need to synthesize this class.
+ if ((CDecl->isForwardDecl() || NumIvars == 0) &&
+ (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result.c_str(), Result.size());
+ return;
+ }
+
+ // FIXME: This has potential of causing problem. If
+ // SynthesizeObjCInternalStruct is ever called recursively.
+ Result += "\nstruct ";
+ Result += CDecl->getNameAsString();
+ if (LangOpts.Microsoft)
+ Result += "_IMPL";
+
+ if (NumIvars > 0) {
+ const char *cursor = strchr(startBuf, '{');
+ assert((cursor && endBuf)
+ && "SynthesizeObjCInternalStruct - malformed @interface");
+ // If the buffer contains preprocessor directives, we do more fine-grained
+ // rewrites. This is intended to fix code that looks like (which occurs in
+ // NSURL.h, for example):
+ //
+ // #ifdef XYZ
+ // @interface Foo : NSObject
+ // #else
+ // @interface FooBar : NSObject
+ // #endif
+ // {
+ // int i;
+ // }
+ // @end
+ //
+ // This clause is segregated to avoid breaking the common case.
+ if (BufferContainsPPDirectives(startBuf, cursor)) {
+ SourceLocation L = RCDecl ? CDecl->getSuperClassLoc() :
+ CDecl->getClassLoc();
+ const char *endHeader = SM->getCharacterData(L);
+ endHeader += Lexer::MeasureTokenLength(L, *SM, LangOpts);
+
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ // advance to the end of the referenced protocols.
+ while (endHeader < cursor && *endHeader != '>') endHeader++;
+ endHeader++;
+ }
+ // rewrite the original header
+ ReplaceText(LocStart, endHeader-startBuf, Result.c_str(), Result.size());
+ } else {
+ // rewrite the original header *without* disturbing the '{'
+ ReplaceText(LocStart, cursor-startBuf-1, Result.c_str(), Result.size());
+ }
+ if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
+ Result = "\n struct ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IMPL ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n";
+
+ // insert the super class structure definition.
+ SourceLocation OnePastCurly =
+ LocStart.getFileLocWithOffset(cursor-startBuf+1);
+ InsertText(OnePastCurly, Result.c_str(), Result.size());
+ }
+ cursor++; // past '{'
+
+ // Now comment out any visibility specifiers.
+ while (cursor < endBuf) {
+ if (*cursor == '@') {
+ SourceLocation atLoc = LocStart.getFileLocWithOffset(cursor-startBuf);
+ // Skip whitespace.
+ for (++cursor; cursor[0] == ' ' || cursor[0] == '\t'; ++cursor)
+ /*scan*/;
+
+ // FIXME: presence of @public, etc. inside comment results in
+ // this transformation as well, which is still correct c-code.
+ if (!strncmp(cursor, "public", strlen("public")) ||
+ !strncmp(cursor, "private", strlen("private")) ||
+ !strncmp(cursor, "package", strlen("package")) ||
+ !strncmp(cursor, "protected", strlen("protected")))
+ InsertText(atLoc, "// ", 3);
+ }
+ // FIXME: If there are cases where '<' is used in ivar declaration part
+ // of user code, then scan the ivar list and use needToScanForQualifiers
+ // for type checking.
+ else if (*cursor == '<') {
+ SourceLocation atLoc = LocStart.getFileLocWithOffset(cursor-startBuf);
+ InsertText(atLoc, "/* ", 3);
+ cursor = strchr(cursor, '>');
+ cursor++;
+ atLoc = LocStart.getFileLocWithOffset(cursor-startBuf);
+ InsertText(atLoc, " */", 3);
+ } else if (*cursor == '^') { // rewrite block specifier.
+ SourceLocation caretLoc = LocStart.getFileLocWithOffset(cursor-startBuf);
+ ReplaceText(caretLoc, 1, "*", 1);
+ }
+ cursor++;
+ }
+ // Don't forget to add a ';'!!
+ InsertText(LocEnd.getFileLocWithOffset(1), ";", 1);
+ } else { // we don't have any instance variables - insert super struct.
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ Result += " {\n struct ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IMPL ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n};\n";
+ ReplaceText(LocStart, endBuf-startBuf, Result.c_str(), Result.size());
+ }
+ // Mark this struct as having been generated.
+ if (!ObjCSynthesizedStructs.insert(CDecl))
+ assert(false && "struct already synthesize- SynthesizeObjCInternalStruct");
+}
+
+// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
+/// class methods.
+template<typename MethodIterator>
+void RewriteObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ const char *prefix,
+ const char *ClassName,
+ std::string &Result) {
+ if (MethodBegin == MethodEnd) return;
+
+ static bool objc_impl_method = false;
+ if (!objc_impl_method) {
+ /* struct _objc_method {
+ SEL _cmd;
+ char *method_types;
+ void *_imp;
+ }
+ */
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tSEL _cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ objc_impl_method = true;
+ }
+
+ // Build _objc_method_list for class's methods if needed
+
+ /* struct {
+ struct _objc_method_list *next_method;
+ int method_count;
+ struct _objc_method method_list[];
+ }
+ */
+ unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_method_list *next_method;\n";
+ Result += "\tint method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
+ Result += "_METHODS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __";
+ Result += IsInstanceMethod ? "inst" : "cls";
+ Result += "_meth\")))= ";
+ Result += "{\n\t0, " + utostr(NumMethods) + "\n";
+
+ Result += "\t,{{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
+ Result += "\t ,{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
+void RewriteObjC::
+RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl, const char *prefix,
+ const char *ClassName, std::string &Result) {
+ static bool objc_protocol_methods = false;
+
+ // Output struct protocol_methods holder of method selector and type.
+ if (!objc_protocol_methods && !PDecl->isForwardDecl()) {
+ /* struct protocol_methods {
+ SEL _cmd;
+ char *method_types;
+ }
+ */
+ Result += "\nstruct _protocol_methods {\n";
+ Result += "\tstruct objc_selector *_cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "};\n";
+
+ objc_protocol_methods = true;
+ }
+ // Do not synthesize the protocol more than once.
+ if (ObjCSynthesizedProtocols.count(PDecl))
+ return;
+
+ if (PDecl->instmeth_begin(*Context) != PDecl->instmeth_end(*Context)) {
+ unsigned NumMethods = std::distance(PDecl->instmeth_begin(*Context),
+ PDecl->instmeth_end(*Context));
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= "
+ "{\n\t" + utostr(NumMethods) + "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(*Context),
+ E = PDecl->instmeth_end(*Context);
+ I != E; ++I) {
+ if (I == PDecl->instmeth_begin(*Context))
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output class methods declared in this protocol.
+ unsigned NumMethods = std::distance(PDecl->classmeth_begin(*Context),
+ PDecl->classmeth_end(*Context));
+ if (NumMethods > 0) {
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t";
+ Result += utostr(NumMethods);
+ Result += "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(*Context),
+ E = PDecl->classmeth_end(*Context);
+ I != E; ++I) {
+ if (I == PDecl->classmeth_begin(*Context))
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output:
+ /* struct _objc_protocol {
+ // Objective-C 1.0 extensions
+ struct _objc_protocol_extension *isa;
+ char *protocol_name;
+ struct _objc_protocol **protocol_list;
+ struct _objc_protocol_method_list *instance_methods;
+ struct _objc_protocol_method_list *class_methods;
+ };
+ */
+ static bool objc_protocol = false;
+ if (!objc_protocol) {
+ Result += "\nstruct _objc_protocol {\n";
+ Result += "\tstruct _objc_protocol_extension *isa;\n";
+ Result += "\tchar *protocol_name;\n";
+ Result += "\tstruct _objc_protocol **protocol_list;\n";
+ Result += "\tstruct _objc_protocol_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_protocol_method_list *class_methods;\n";
+ Result += "};\n";
+
+ objc_protocol = true;
+ }
+
+ Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= "
+ "{\n\t0, \"";
+ Result += PDecl->getNameAsString();
+ Result += "\", 0, ";
+ if (PDecl->instmeth_begin(*Context) != PDecl->instmeth_end(*Context)) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += ", ";
+ }
+ else
+ Result += "0, ";
+ if (PDecl->classmeth_begin(*Context) != PDecl->classmeth_end(*Context)) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += "0\n";
+ Result += "};\n";
+
+ // Mark this protocol as having been generated.
+ if (!ObjCSynthesizedProtocols.insert(PDecl))
+ assert(false && "protocol already synthesized");
+
+}
+
+void RewriteObjC::
+RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Protocols,
+ const char *prefix, const char *ClassName,
+ std::string &Result) {
+ if (Protocols.empty()) return;
+
+ for (unsigned i = 0; i != Protocols.size(); i++)
+ RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result);
+
+ // Output the top lovel protocol meta-data for the class.
+ /* struct _objc_protocol_list {
+ struct _objc_protocol_list *next;
+ int protocol_count;
+ struct _objc_protocol *class_protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_protocol_list *next;\n";
+ Result += "\tint protocol_count;\n";
+ Result += "\tstruct _objc_protocol *class_protocols[";
+ Result += utostr(Protocols.size());
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += "_PROTOCOLS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t0, ";
+ Result += utostr(Protocols.size());
+ Result += "\n";
+
+ Result += "\t,{&_OBJC_PROTOCOL_";
+ Result += Protocols[0]->getNameAsString();
+ Result += " \n";
+
+ for (unsigned i = 1; i != Protocols.size(); i++) {
+ Result += "\t ,&_OBJC_PROTOCOL_";
+ Result += Protocols[i]->getNameAsString();
+ Result += "\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+
+/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
+/// implementation.
+void RewriteObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ // Find category declaration for this implementation.
+ ObjCCategoryDecl *CDecl;
+ for (CDecl = ClassDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->getIdentifier() == IDecl->getIdentifier())
+ break;
+
+ std::string FullCategoryName = ClassDecl->getNameAsString();
+ FullCategoryName += '_';
+ FullCategoryName += IDecl->getNameAsString();
+
+ // Build _objc_method_list for class's instance methods if needed
+ llvm::SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(*Context),
+ IDecl->instmeth_end(*Context));
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(*Context),
+ PropEnd = IDecl->propimpl_end(*Context);
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(*Context),
+ IDecl->classmeth_end(*Context),
+ false, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Protocols referenced in class declaration?
+ // Null CDecl is case of a category implementation with no category interface
+ if (CDecl)
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY",
+ FullCategoryName.c_str(), Result);
+ /* struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions
+ uint32_t size; // sizeof (struct _objc_category)
+ struct _objc_property_list *instance_properties; // category's own
+ // @property decl.
+ };
+ */
+
+ static bool objc_category = false;
+ if (!objc_category) {
+ Result += "\nstruct _objc_category {\n";
+ Result += "\tchar *category_name;\n";
+ Result += "\tchar *class_name;\n";
+ Result += "\tstruct _objc_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_method_list *class_methods;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tunsigned int size;\n";
+ Result += "\tstruct _objc_property_list *instance_properties;\n";
+ Result += "};\n";
+ objc_category = true;
+ }
+ Result += "\nstatic struct _objc_category _OBJC_CATEGORY_";
+ Result += FullCategoryName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\"";
+ Result += IDecl->getNameAsString();
+ Result += "\"\n\t, \"";
+ Result += ClassDecl->getNameAsString();
+ Result += "\"\n";
+
+ if (IDecl->instmeth_begin(*Context) != IDecl->instmeth_end(*Context)) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_INSTANCE_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ if (IDecl->classmeth_begin(*Context) != IDecl->classmeth_end(*Context)) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_CLASS_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+
+ if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ Result += "\t, sizeof(struct _objc_category), 0\n};\n";
+}
+
+/// SynthesizeIvarOffsetComputation - This rutine synthesizes computation of
+/// ivar offset.
+void RewriteObjC::SynthesizeIvarOffsetComputation(ObjCImplementationDecl *IDecl,
+ ObjCIvarDecl *ivar,
+ std::string &Result) {
+ if (ivar->isBitField()) {
+ // FIXME: The hack below doesn't work for bitfields. For now, we simply
+ // place all bitfields at offset 0.
+ Result += "0";
+ } else {
+ Result += "__OFFSETOFIVAR__(struct ";
+ Result += IDecl->getNameAsString();
+ if (LangOpts.Microsoft)
+ Result += "_IMPL";
+ Result += ", ";
+ Result += ivar->getNameAsString();
+ Result += ")";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Meta Data Emission
+//===----------------------------------------------------------------------===//
+
+void RewriteObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+
+ // Explictly declared @interface's are already synthesized.
+ if (CDecl->isImplicitInterfaceDecl()) {
+ // FIXME: Implementation of a class with no @interface (legacy) doese not
+ // produce correct synthesis as yet.
+ SynthesizeObjCInternalStruct(CDecl, Result);
+ }
+
+ // Build _objc_ivar_list metadata for classes ivars if needed
+ unsigned NumIvars = !IDecl->ivar_empty(*Context)
+ ? IDecl->ivar_size(*Context)
+ : (CDecl ? CDecl->ivar_size() : 0);
+ if (NumIvars > 0) {
+ static bool objc_ivar = false;
+ if (!objc_ivar) {
+ /* struct _objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+ */
+ Result += "\nstruct _objc_ivar {\n";
+ Result += "\tchar *ivar_name;\n";
+ Result += "\tchar *ivar_type;\n";
+ Result += "\tint ivar_offset;\n";
+ Result += "};\n";
+
+ objc_ivar = true;
+ }
+
+ /* struct {
+ int ivar_count;
+ struct _objc_ivar ivar_list[nIvars];
+ };
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint ivar_count;\n";
+ Result += "\tstruct _objc_ivar ivar_list[";
+ Result += utostr(NumIvars);
+ Result += "];\n} _OBJC_INSTANCE_VARIABLES_";
+ Result += IDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= "
+ "{\n\t";
+ Result += utostr(NumIvars);
+ Result += "\n";
+
+ ObjCInterfaceDecl::ivar_iterator IVI, IVE;
+ llvm::SmallVector<ObjCIvarDecl *, 8> IVars;
+ if (!IDecl->ivar_empty(*Context)) {
+ for (ObjCImplementationDecl::ivar_iterator
+ IV = IDecl->ivar_begin(*Context),
+ IVEnd = IDecl->ivar_end(*Context);
+ IV != IVEnd; ++IV)
+ IVars.push_back(*IV);
+ IVI = IVars.begin();
+ IVE = IVars.end();
+ } else {
+ IVI = CDecl->ivar_begin();
+ IVE = CDecl->ivar_end();
+ }
+ Result += "\t,{{\"";
+ Result += (*IVI)->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ SynthesizeIvarOffsetComputation(IDecl, *IVI, Result);
+ Result += "}\n";
+ for (++IVI; IVI != IVE; ++IVI) {
+ Result += "\t ,{\"";
+ Result += (*IVI)->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ SynthesizeIvarOffsetComputation(IDecl, (*IVI), Result);
+ Result += "}\n";
+ }
+
+ Result += "\t }\n};\n";
+ }
+
+ // Build _objc_method_list for class's instance methods if needed
+ llvm::SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(*Context),
+ IDecl->instmeth_end(*Context));
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(*Context),
+ PropEnd = IDecl->propimpl_end(*Context);
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "", IDecl->getNameAsCString(), Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(*Context),
+ IDecl->classmeth_end(*Context),
+ false, "", IDecl->getNameAsCString(), Result);
+
+ // Protocols referenced in class declaration?
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(),
+ "CLASS", CDecl->getNameAsCString(), Result);
+
+ // Declaration of class/meta-class metadata
+ /* struct _objc_class {
+ struct _objc_class *isa; // or const char *root_class_name when metadata
+ const char *super_class_name;
+ char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct objc_cache *cache;
+ struct objc_protocol_list *protocols;
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+ */
+ static bool objc_class = false;
+ if (!objc_class) {
+ Result += "\nstruct _objc_class {\n";
+ Result += "\tstruct _objc_class *isa;\n";
+ Result += "\tconst char *super_class_name;\n";
+ Result += "\tchar *name;\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong info;\n";
+ Result += "\tlong instance_size;\n";
+ Result += "\tstruct _objc_ivar_list *ivars;\n";
+ Result += "\tstruct _objc_method_list *methods;\n";
+ Result += "\tstruct objc_cache *cache;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tconst char *ivar_layout;\n";
+ Result += "\tstruct _objc_class_ext *ext;\n";
+ Result += "};\n";
+ objc_class = true;
+ }
+
+ // Meta-class metadata generation.
+ ObjCInterfaceDecl *RootClass = 0;
+ ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
+ while (SuperClass) {
+ RootClass = SuperClass;
+ SuperClass = SuperClass->getSuperClass();
+ }
+ SuperClass = CDecl->getSuperClass();
+
+ Result += "\nstatic struct _objc_class _OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= "
+ "{\n\t(struct _objc_class *)\"";
+ Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString());
+ Result += "\"";
+
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // Set 'ivars' field for root class to 0. ObjC1 runtime does not use it.
+ // 'info' field is initialized to CLS_META(2) for metaclass
+ Result += ", 0,2, sizeof(struct _objc_class), 0";
+ if (IDecl->classmeth_begin(*Context) != IDecl->classmeth_end(*Context)) {
+ Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_";
+ Result += IDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += ", 0\n";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ",0,0\n";
+ }
+ else
+ Result += "\t,0,0,0,0\n";
+ Result += "};\n";
+
+ // class metadata generation.
+ Result += "\nstatic struct _objc_class _OBJC_CLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= "
+ "{\n\t&_OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // 'info' field is initialized to CLS_CLASS(1) for class
+ Result += ", 0,1";
+ if (!ObjCSynthesizedStructs.count(CDecl))
+ Result += ",0";
+ else {
+ // class has size. Must synthesize its size.
+ Result += ",sizeof(struct ";
+ Result += CDecl->getNameAsString();
+ if (LangOpts.Microsoft)
+ Result += "_IMPL";
+ Result += ")";
+ }
+ if (NumIvars > 0) {
+ Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_";
+ Result += CDecl->getNameAsString();
+ Result += "\n\t";
+ }
+ else
+ Result += ",0";
+ if (IDecl->instmeth_begin(*Context) != IDecl->instmeth_end(*Context)) {
+ Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0\n\t";
+ }
+ else
+ Result += ",0,0";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0,0\n";
+ }
+ else
+ Result += ",0,0,0\n";
+ Result += "};\n";
+}
+
+/// RewriteImplementations - This routine rewrites all method implementations
+/// and emits meta-data.
+
+void RewriteObjC::RewriteImplementations() {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // Rewrite implemented methods
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteImplementationDecl(ClassImplementation[i]);
+
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteImplementationDecl(CategoryImplementation[i]);
+}
+
+void RewriteObjC::SynthesizeMetaDataIntoBuffer(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // This is needed for determining instance variable offsets.
+ Result += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((int) &((TYPE *)0)->MEMBER)\n";
+ // For each implemented class, write out all its meta data.
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteObjCClassMetaData(ClassImplementation[i], Result);
+
+ // For each implemented category, write out all its meta data.
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
+
+ // Write objc_symtab metadata
+ /*
+ struct _objc_symtab
+ {
+ long sel_ref_cnt;
+ SEL *refs;
+ short cls_def_cnt;
+ short cat_def_cnt;
+ void *defs[cls_def_cnt + cat_def_cnt];
+ };
+ */
+
+ Result += "\nstruct _objc_symtab {\n";
+ Result += "\tlong sel_ref_cnt;\n";
+ Result += "\tSEL *refs;\n";
+ Result += "\tshort cls_def_cnt;\n";
+ Result += "\tshort cat_def_cnt;\n";
+ Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n";
+ Result += "};\n\n";
+
+ Result += "static struct _objc_symtab "
+ "_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n";
+ Result += "\t0, 0, " + utostr(ClsDefCount)
+ + ", " + utostr(CatDefCount) + "\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ Result += "\t,&_OBJC_CLASS_";
+ Result += ClassImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ for (int i = 0; i < CatDefCount; i++) {
+ Result += "\t,&_OBJC_CATEGORY_";
+ Result += CategoryImplementation[i]->getClassInterface()->getNameAsString();
+ Result += "_";
+ Result += CategoryImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ Result += "};\n\n";
+
+ // Write objc_module metadata
+
+ /*
+ struct _objc_module {
+ long version;
+ long size;
+ const char *name;
+ struct _objc_symtab *symtab;
+ }
+ */
+
+ Result += "\nstruct _objc_module {\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong size;\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tstruct _objc_symtab *symtab;\n";
+ Result += "};\n\n";
+ Result += "static struct _objc_module "
+ "_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n";
+ Result += "\t" + utostr(OBJC_ABI_VERSION) +
+ ", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n";
+ Result += "};\n\n";
+
+ if (LangOpts.Microsoft) {
+ if (ProtocolExprDecls.size()) {
+ Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_protocol$B\")\n";
+ for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
+ E = ProtocolExprDecls.end(); I != E; ++I) {
+ Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_";
+ Result += (*I)->getNameAsString();
+ Result += " = &_OBJC_PROTOCOL_";
+ Result += (*I)->getNameAsString();
+ Result += ";\n";
+ }
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+ Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_module_info$B\")\n";
+ Result += "static struct _objc_module *_POINTER_OBJC_MODULES = ";
+ Result += "&_OBJC_MODULES;\n";
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+}
+
+std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
+ const char *funcName,
+ std::string Tag) {
+ const FunctionType *AFT = CE->getFunctionType();
+ QualType RT = AFT->getResultType();
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static " + RT.getAsString() + " __" +
+ funcName + "_" + "block_func_" + utostr(i);
+
+ BlockDecl *BD = CE->getBlockDecl();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ // No user-supplied arguments. Still need to pass in a pointer to the
+ // block (to reference imported block decl refs).
+ S += "(" + StructRef + " *__cself)";
+ } else if (BD->param_empty()) {
+ S += "(" + StructRef + " *__cself)";
+ } else {
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ assert(FT && "SynthesizeBlockFunc: No function proto");
+ S += '(';
+ // first add the implicit argument.
+ S += StructRef + " *__cself, ";
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) S += ", ";
+ ParamStr = (*AI)->getNameAsString();
+ (*AI)->getType().getAsStringInternal(ParamStr, Context->PrintingPolicy);
+ S += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) S += ", ";
+ S += "...";
+ }
+ S += ')';
+ }
+ S += " {\n";
+
+ // Create local declarations to avoid rewriting all closure decl ref exprs.
+ // First, emit a declaration for all "by ref" decls.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ Context->getPointerType((*I)->getType()).getAsStringInternal(Name,
+ Context->PrintingPolicy);
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
+ }
+ // Next, emit a declaration for all "by copy" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedClosure)(void);
+ // myImportedClosure = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherClosure)(void);
+ // anotherClosure = ^(void) {
+ // myImportedClosure(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ S += "struct __block_impl *";
+ else
+ (*I)->getType().getAsStringInternal(Name, Context->PrintingPolicy);
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ std::string RewrittenStr = RewrittenBlockExprs[CE];
+ const char *cstr = RewrittenStr.c_str();
+ while (*cstr++ != '{') ;
+ S += cstr;
+ S += "\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ const char *funcName,
+ std::string Tag) {
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static void __";
+
+ S += funcName;
+ S += "_block_copy_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*dst, " + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ S += "_Block_object_assign((void*)&dst->";
+ S += (*I)->getNameAsString();
+ S += ", (void*)src->";
+ S += (*I)->getNameAsString();
+ S += ", 3/*BLOCK_FIELD_IS_OBJECT*/);}";
+ }
+ S += "\nstatic void __";
+ S += funcName;
+ S += "_block_dispose_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ S += "_Block_object_dispose((void*)src->";
+ S += (*I)->getNameAsString();
+ S += ", 3/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ bool hasCopyDisposeHelpers) {
+ std::string S = "\nstruct " + Tag;
+ std::string Constructor = " " + Tag;
+
+ S += " {\n struct __block_impl impl;\n";
+
+ if (hasCopyDisposeHelpers)
+ S += " void *copy;\n void *dispose;\n";
+
+ Constructor += "(void *fp";
+
+ if (hasCopyDisposeHelpers)
+ Constructor += ", void *copyHelp, void *disposeHelp";
+
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ (*I)->getType().getAsStringInternal(FieldName, Context->PrintingPolicy);
+ (*I)->getType().getAsStringInternal(ArgName, Context->PrintingPolicy);
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + ";\n";
+ }
+ // Output all "by ref" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ Context->getPointerType((*I)->getType()).getAsStringInternal(FieldName,
+ Context->PrintingPolicy);
+ Context->getPointerType((*I)->getType()).getAsStringInternal(ArgName,
+ Context->PrintingPolicy);
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + "; // by ref\n";
+ }
+ // Finish writing the constructor.
+ Constructor += ", int flags=0) {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Size = sizeof(";
+ Constructor += Tag + ");\n impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+
+ if (hasCopyDisposeHelpers)
+ Constructor += " copy = copyHelp;\n dispose = disposeHelp;\n";
+
+ // Initialize all "by copy" arguments.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ Constructor += " ";
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ Constructor += Name + " = (struct __block_impl *)_";
+ else
+ Constructor += Name + " = _";
+ Constructor += Name + ";\n";
+ }
+ // Initialize all "by ref" arguments.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ Constructor += " ";
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ Constructor += Name + " = (struct __block_impl *)_";
+ else
+ Constructor += Name + " = _";
+ Constructor += Name + ";\n";
+ }
+ } else {
+ // Finish writing the constructor.
+ Constructor += ", int flags=0) {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Size = sizeof(";
+ Constructor += Tag + ");\n impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+ if (hasCopyDisposeHelpers)
+ Constructor += " copy = copyHelp;\n dispose = disposeHelp;\n";
+ }
+ Constructor += " ";
+ Constructor += "}\n";
+ S += Constructor;
+ S += "};\n";
+ return S;
+}
+
+void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ const char *FunName) {
+ // Insert closures that were part of the function.
+ for (unsigned i = 0; i < Blocks.size(); i++) {
+
+ CollectBlockDeclRefInfo(Blocks[i]);
+
+ std::string Tag = "__" + std::string(FunName) + "_block_impl_" + utostr(i);
+
+ std::string CI = SynthesizeBlockImpl(Blocks[i], Tag,
+ ImportedBlockDecls.size() > 0);
+
+ InsertText(FunLocStart, CI.c_str(), CI.size());
+
+ std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, Tag);
+
+ InsertText(FunLocStart, CF.c_str(), CF.size());
+
+ if (ImportedBlockDecls.size()) {
+ std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, Tag);
+ InsertText(FunLocStart, HF.c_str(), HF.size());
+ }
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByCopyDecls.clear();
+ BlockCallExprs.clear();
+ ImportedBlockDecls.clear();
+ }
+ Blocks.clear();
+ RewrittenBlockExprs.clear();
+}
+
+void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const char *FuncName = FD->getNameAsCString();
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
+ //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ //SourceLocation FunLocStart = MD->getLocStart();
+ // FIXME: This hack works around a bug in Rewrite.InsertText().
+ SourceLocation FunLocStart = MD->getLocStart().getFileLocWithOffset(-1);
+ std::string FuncName = MD->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = FuncName.find(":", loc)) != std::string::npos)
+ FuncName.replace(loc, 1, "_");
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName.c_str());
+}
+
+void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
+ GetBlockDeclRefExprs(CBE->getBody());
+ else
+ GetBlockDeclRefExprs(*CI);
+ }
+ // Handle specific things.
+ if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(S))
+ // FIXME: Handle enums.
+ if (!isa<FunctionDecl>(CDRE->getDecl()))
+ BlockDeclRefs.push_back(CDRE);
+ return;
+}
+
+void RewriteObjC::GetBlockCallExprs(Stmt *S) {
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
+ GetBlockCallExprs(CBE->getBody());
+ else
+ GetBlockCallExprs(*CI);
+ }
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ BlockCallExprs[dyn_cast<BlockDeclRefExpr>(CE->getCallee())] = CE;
+ }
+ }
+ return;
+}
+
+Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp) {
+ // Navigate to relevant type information.
+ const char *closureName = 0;
+ const BlockPointerType *CPT = 0;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp->getCallee())) {
+ closureName = DRE->getDecl()->getNameAsCString();
+ CPT = DRE->getType()->getAsBlockPointerType();
+ } else if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(Exp->getCallee())) {
+ closureName = CDRE->getDecl()->getNameAsCString();
+ CPT = CDRE->getType()->getAsBlockPointerType();
+ } else if (MemberExpr *MExpr = dyn_cast<MemberExpr>(Exp->getCallee())) {
+ closureName = MExpr->getMemberDecl()->getNameAsCString();
+ CPT = MExpr->getType()->getAsBlockPointerType();
+ } else {
+ assert(1 && "RewriteBlockClass: Bad type");
+ }
+ assert(CPT && "RewriteBlockClass: Bad type");
+ const FunctionType *FT = CPT->getPointeeType()->getAsFunctionType();
+ assert(FT && "RewriteBlockClass: Bad type");
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+
+ RecordDecl *RD = RecordDecl::Create(*Context, TagDecl::TK_struct, TUDecl,
+ SourceLocation(),
+ &Context->Idents.get("__block_impl"));
+ QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+
+ // Generate a funky cast.
+ llvm::SmallVector<QualType, 8> ArgTypes;
+
+ // Push the block argument type.
+ ArgTypes.push_back(PtrBlock);
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I) {
+ QualType t = *I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (isTopLevelBlockPointerType(t)) {
+ const BlockPointerType *BPT = t->getAsBlockPointerType();
+ t = Context->getPointerType(BPT->getPointeeType());
+ }
+ ArgTypes.push_back(t);
+ }
+ }
+ // Now do the pointer to function cast.
+ QualType PtrToFuncCastType = Context->getFunctionType(Exp->getType(),
+ &ArgTypes[0], ArgTypes.size(), false/*no variadic*/, 0);
+
+ PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
+
+ CastExpr *BlkCast = new (Context) CStyleCastExpr(PtrBlock, Exp->getCallee(),
+ PtrBlock, SourceLocation(),
+ SourceLocation());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ BlkCast);
+ //PE->dump();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ &Context->Idents.get("FuncPtr"), Context->VoidPtrTy,
+ /*BitWidth=*/0, /*Mutable=*/true);
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
+ FD->getType());
+
+ CastExpr *FunkCast = new (Context) CStyleCastExpr(PtrToFuncCastType, ME,
+ PtrToFuncCastType,
+ SourceLocation(),
+ SourceLocation());
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
+
+ llvm::SmallVector<Expr*, 8> BlkExprs;
+ // Add the implicit argument.
+ BlkExprs.push_back(BlkCast);
+ // Add the user arguments.
+ for (CallExpr::arg_iterator I = Exp->arg_begin(),
+ E = Exp->arg_end(); I != E; ++I) {
+ BlkExprs.push_back(*I);
+ }
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0],
+ BlkExprs.size(),
+ Exp->getType(), SourceLocation());
+ return CE;
+}
+
+void RewriteObjC::RewriteBlockCall(CallExpr *Exp) {
+ Stmt *BlockCall = SynthesizeBlockCall(Exp);
+ ReplaceStmt(Exp, BlockCall);
+}
+
+// We need to return the rewritten expression to handle cases where the
+// BlockDeclRefExpr is embedded in another expression being rewritten.
+// For example:
+//
+// int main() {
+// __block Foo *f;
+// __block int i;
+//
+// void (^myblock)() = ^() {
+// [f test]; // f is a BlockDeclRefExpr embedded in a message (which is being rewritten).
+// i = 77;
+// };
+//}
+Stmt *RewriteObjC::RewriteBlockDeclRefExpr(BlockDeclRefExpr *BDRE) {
+ // FIXME: Add more elaborate code generation required by the ABI.
+ Expr *DerefExpr = new (Context) UnaryOperator(BDRE, UnaryOperator::Deref,
+ Context->getPointerType(BDRE->getType()),
+ SourceLocation());
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), DerefExpr);
+ ReplaceStmt(BDRE, PE);
+ return PE;
+}
+
+void RewriteObjC::RewriteCastExpr(CStyleCastExpr *CE) {
+ SourceLocation LocStart = CE->getLParenLoc();
+ SourceLocation LocEnd = CE->getRParenLoc();
+
+ // Need to avoid trying to rewrite synthesized casts.
+ if (LocStart.isInvalid())
+ return;
+ // Need to avoid trying to rewrite casts contained in macros.
+ if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
+ return;
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // advance the location to startArgList.
+ const char *argPtr = startBuf;
+
+ while (*argPtr++ && (argPtr < endBuf)) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ LocStart = LocStart.getFileLocWithOffset(argPtr-startBuf);
+ ReplaceText(LocStart, 1, "*", 1);
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
+ SourceLocation DeclLoc = FD->getLocation();
+ unsigned parenCount = 0;
+
+ // We have 1 or more arguments that have closure pointers.
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *startArgList = strchr(startBuf, '(');
+
+ assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
+
+ parenCount++;
+ // advance the location to startArgList.
+ DeclLoc = DeclLoc.getFileLocWithOffset(startArgList-startBuf);
+ assert((DeclLoc.isValid()) && "Invalid DeclLoc");
+
+ const char *argPtr = startArgList;
+
+ while (*argPtr++ && parenCount) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ DeclLoc = DeclLoc.getFileLocWithOffset(argPtr-startArgList);
+ ReplaceText(DeclLoc, 1, "*", 1);
+ break;
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ }
+ }
+ return;
+}
+
+bool RewriteObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAsPointerType();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAsFunctionProtoType();
+ } else {
+ const BlockPointerType *BPT = QT->getAsBlockPointerType();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAsFunctionProtoType();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I)
+ if (isTopLevelBlockPointerType(*I))
+ return true;
+ }
+ return false;
+}
+
+void RewriteObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen) {
+ const char *argPtr = strchr(Name, '(');
+ assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
+
+ LParen = argPtr; // output the start.
+ argPtr++; // skip past the left paren.
+ unsigned parenCount = 1;
+
+ while (*argPtr && parenCount) {
+ switch (*argPtr) {
+ case '(': parenCount++; break;
+ case ')': parenCount--; break;
+ default: break;
+ }
+ if (parenCount) argPtr++;
+ }
+ assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
+ RParen = argPtr; // output the end
+}
+
+void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ RewriteBlockPointerFunctionArgs(FD);
+ return;
+ }
+ // Handle Variables and Typedefs.
+ SourceLocation DeclLoc = ND->getLocation();
+ QualType DeclT;
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ DeclT = VD->getType();
+ else if (TypedefDecl *TDD = dyn_cast<TypedefDecl>(ND))
+ DeclT = TDD->getUnderlyingType();
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
+ DeclT = FD->getType();
+ else
+ assert(0 && "RewriteBlockPointerDecl(): Decl type not yet handled");
+
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *endBuf = startBuf;
+ // scan backward (from the decl location) for the end of the previous decl.
+ while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
+ startBuf--;
+
+ // *startBuf != '^' if we are dealing with a pointer to function that
+ // may take block argument types (which will be handled below).
+ if (*startBuf == '^') {
+ // Replace the '^' with '*', computing a negative offset.
+ DeclLoc = DeclLoc.getFileLocWithOffset(startBuf-endBuf);
+ ReplaceText(DeclLoc, 1, "*", 1);
+ }
+ if (PointerTypeTakesAnyBlockArguments(DeclT)) {
+ // Replace the '^' with '*' for arguments.
+ DeclLoc = ND->getLocation();
+ startBuf = SM->getCharacterData(DeclLoc);
+ const char *argListBegin, *argListEnd;
+ GetExtentOfArgList(startBuf, argListBegin, argListEnd);
+ while (argListBegin < argListEnd) {
+ if (*argListBegin == '^') {
+ SourceLocation CaretLoc = DeclLoc.getFileLocWithOffset(argListBegin-startBuf);
+ ReplaceText(CaretLoc, 1, "*", 1);
+ }
+ argListBegin++;
+ }
+ }
+ return;
+}
+
+void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
+ // Add initializers for any closure decl refs.
+ GetBlockDeclRefExprs(Exp->getBody());
+ if (BlockDeclRefs.size()) {
+ // Unique all "by copy" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (!BlockDeclRefs[i]->isByRef())
+ BlockByCopyDecls.insert(BlockDeclRefs[i]->getDecl());
+ // Unique all "by ref" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->isByRef()) {
+ BlockByRefDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getType()->isBlockPointerType()) {
+ GetBlockCallExprs(BlockDeclRefs[i]);
+ ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+ }
+}
+
+FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(const char *name) {
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
+ return FunctionDecl::Create(*Context, TUDecl,SourceLocation(),
+ ID, FType, FunctionDecl::Extern, false,
+ false);
+}
+
+Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp) {
+ Blocks.push_back(Exp);
+
+ CollectBlockDeclRefInfo(Exp);
+ std::string FuncName;
+
+ if (CurFunctionDef)
+ FuncName = CurFunctionDef->getNameAsString();
+ else if (CurMethodDef) {
+ FuncName = CurMethodDef->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = FuncName.find(":", loc)) != std::string::npos)
+ FuncName.replace(loc, 1, "_");
+ } else if (GlobalVarDecl)
+ FuncName = std::string(GlobalVarDecl->getNameAsString());
+
+ std::string BlockNumber = utostr(Blocks.size()-1);
+
+ std::string Tag = "__" + FuncName + "_block_impl_" + BlockNumber;
+ std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
+
+ // Get a pointer to the function type so we can cast appropriately.
+ QualType FType = Context->getPointerType(QualType(Exp->getFunctionType(),0));
+
+ FunctionDecl *FD;
+ Expr *NewRep;
+
+ // Simulate a contructor call...
+ FD = SynthBlockInitFunctionDecl(Tag.c_str());
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, FType, SourceLocation());
+
+ llvm::SmallVector<Expr*, 4> InitExprs;
+
+ // Initialize the block function.
+ FD = SynthBlockInitFunctionDecl(Func.c_str());
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, FD->getType(),
+ SourceLocation());
+ CastExpr *castExpr = new (Context) CStyleCastExpr(Context->VoidPtrTy, Arg,
+ Context->VoidPtrTy, SourceLocation(),
+ SourceLocation());
+ InitExprs.push_back(castExpr);
+
+ if (ImportedBlockDecls.size()) {
+ std::string Buf = "__" + FuncName + "_block_copy_" + BlockNumber;
+ FD = SynthBlockInitFunctionDecl(Buf.c_str());
+ Arg = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ castExpr = new (Context) CStyleCastExpr(Context->VoidPtrTy, Arg,
+ Context->VoidPtrTy, SourceLocation(),
+ SourceLocation());
+ InitExprs.push_back(castExpr);
+
+ Buf = "__" + FuncName + "_block_dispose_" + BlockNumber;
+ FD = SynthBlockInitFunctionDecl(Buf.c_str());
+ Arg = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ castExpr = new (Context) CStyleCastExpr(Context->VoidPtrTy, Arg,
+ Context->VoidPtrTy, SourceLocation(),
+ SourceLocation());
+ InitExprs.push_back(castExpr);
+ }
+ // Add initializers for any closure decl refs.
+ if (BlockDeclRefs.size()) {
+ Expr *Exp;
+ // Output all "by copy" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ if (isObjCType((*I)->getType())) {
+ // FIXME: Conform to ABI ([[obj retain] autorelease]).
+ FD = SynthBlockInitFunctionDecl((*I)->getNameAsCString());
+ Exp = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ } else if (isTopLevelBlockPointerType((*I)->getType())) {
+ FD = SynthBlockInitFunctionDecl((*I)->getNameAsCString());
+ Arg = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ Exp = new (Context) CStyleCastExpr(Context->VoidPtrTy, Arg,
+ Context->VoidPtrTy, SourceLocation(),
+ SourceLocation());
+ } else {
+ FD = SynthBlockInitFunctionDecl((*I)->getNameAsCString());
+ Exp = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ }
+ InitExprs.push_back(Exp);
+ }
+ // Output all "by ref" declarations.
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ FD = SynthBlockInitFunctionDecl((*I)->getNameAsCString());
+ Exp = new (Context) DeclRefExpr(FD, FD->getType(), SourceLocation());
+ Exp = new (Context) UnaryOperator(Exp, UnaryOperator::AddrOf,
+ Context->getPointerType(Exp->getType()),
+ SourceLocation());
+ InitExprs.push_back(Exp);
+ }
+ }
+ NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(),
+ FType, SourceLocation());
+ NewRep = new (Context) UnaryOperator(NewRep, UnaryOperator::AddrOf,
+ Context->getPointerType(NewRep->getType()),
+ SourceLocation());
+ NewRep = new (Context) CStyleCastExpr(FType, NewRep, FType, SourceLocation(),
+ SourceLocation());
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByCopyDecls.clear();
+ ImportedBlockDecls.clear();
+ return NewRep;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Body / Expression rewriting
+//===----------------------------------------------------------------------===//
+
+// This is run as a first "pass" prior to RewriteFunctionBodyOrGlobalInitializer().
+// The allows the main rewrite loop to associate all ObjCPropertyRefExprs with
+// their respective BinaryOperator. Without this knowledge, we'd need to rewrite
+// the ObjCPropertyRefExpr twice (once as a getter, and later as a setter).
+// Since the rewriter isn't capable of rewriting rewritten code, it's important
+// we get this right.
+void RewriteObjC::CollectPropertySetters(Stmt *S) {
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI)
+ CollectPropertySetters(*CI);
+
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
+ if (BinOp->isAssignmentOp()) {
+ if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(BinOp->getLHS()))
+ PropSetters[PRE] = BinOp;
+ }
+ }
+}
+
+Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S))
+ Stmts.push_back(S);
+ else if (isa<ObjCForCollectionStmt>(S)) {
+ Stmts.push_back(S);
+ ObjCBcLabelNo.push_back(++BcLabelCount);
+ }
+
+ SourceRange OrigStmtRange = S->getSourceRange();
+
+ // Perform a bottom up rewrite of all children.
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
+ CI != E; ++CI)
+ if (*CI) {
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(*CI);
+ if (newStmt)
+ *CI = newStmt;
+ }
+
+ if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ // Rewrite the block body in place.
+ RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
+
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string Str = Rewrite.getRewritenText(BE->getSourceRange());
+ RewrittenBlockExprs[BE] = Str;
+
+ Stmt *blockTranscribed = SynthBlockInitExpr(BE);
+ //blockTranscribed->dump();
+ ReplaceStmt(S, blockTranscribed);
+ return blockTranscribed;
+ }
+ // Handle specific things.
+ if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
+ return RewriteAtEncode(AtEncode);
+
+ if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S))
+ return RewriteObjCIvarRefExpr(IvarRefExpr, OrigStmtRange.getBegin());
+
+ if (ObjCPropertyRefExpr *PropRefExpr = dyn_cast<ObjCPropertyRefExpr>(S)) {
+ BinaryOperator *BinOp = PropSetters[PropRefExpr];
+ if (BinOp) {
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to rewrite the right hand side prior to rewriting the setter.
+ DisableReplaceStmt = true;
+ // Save the source range. Even if we disable the replacement, the
+ // rewritten node will have been inserted into the tree. If the synthesized
+ // node is at the 'end', the rewriter will fail. Consider this:
+ // self.errorHandler = handler ? handler :
+ // ^(NSURL *errorURL, NSError *error) { return (BOOL)1; };
+ SourceRange SrcRange = BinOp->getSourceRange();
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(BinOp->getRHS());
+ DisableReplaceStmt = false;
+ //
+ // Unlike the main iterator, we explicily avoid changing 'BinOp'. If
+ // we changed the RHS of BinOp, the rewriter would fail (since it needs
+ // to see the original expression). Consider this example:
+ //
+ // Foo *obj1, *obj2;
+ //
+ // obj1.i = [obj2 rrrr];
+ //
+ // 'BinOp' for the previous expression looks like:
+ //
+ // (BinaryOperator 0x231ccf0 'int' '='
+ // (ObjCPropertyRefExpr 0x231cc70 'int' Kind=PropertyRef Property="i"
+ // (DeclRefExpr 0x231cc50 'Foo *' Var='obj1' 0x231cbb0))
+ // (ObjCMessageExpr 0x231ccb0 'int' selector=rrrr
+ // (DeclRefExpr 0x231cc90 'Foo *' Var='obj2' 0x231cbe0)))
+ //
+ // 'newStmt' represents the rewritten message expression. For example:
+ //
+ // (CallExpr 0x231d300 'id':'struct objc_object *'
+ // (ParenExpr 0x231d2e0 'int (*)(id, SEL)'
+ // (CStyleCastExpr 0x231d2c0 'int (*)(id, SEL)'
+ // (CStyleCastExpr 0x231d220 'void *'
+ // (DeclRefExpr 0x231d200 'id (id, SEL, ...)' FunctionDecl='objc_msgSend' 0x231cdc0))))
+ //
+ // Note that 'newStmt' is passed to RewritePropertySetter so that it
+ // can be used as the setter argument. ReplaceStmt() will still 'see'
+ // the original RHS (since we haven't altered BinOp).
+ //
+ // This implies the Rewrite* routines can no longer delete the original
+ // node. As a result, we now leak the original AST nodes.
+ //
+ return RewritePropertySetter(BinOp, dyn_cast<Expr>(newStmt), SrcRange);
+ } else {
+ return RewritePropertyGetter(PropRefExpr);
+ }
+ }
+ if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
+ return RewriteAtSelector(AtSelector);
+
+ if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
+ return RewriteObjCStringLiteral(AtString);
+
+ if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
+#if 0
+ // Before we rewrite it, put the original message expression in a comment.
+ SourceLocation startLoc = MessExpr->getLocStart();
+ SourceLocation endLoc = MessExpr->getLocEnd();
+
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *endBuf = SM->getCharacterData(endLoc);
+
+ std::string messString;
+ messString += "// ";
+ messString.append(startBuf, endBuf-startBuf+1);
+ messString += "\n";
+
+ // FIXME: Missing definition of
+ // InsertText(clang::SourceLocation, char const*, unsigned int).
+ // InsertText(startLoc, messString.c_str(), messString.size());
+ // Tried this, but it didn't work either...
+ // ReplaceText(startLoc, 0, messString.c_str(), messString.size());
+#endif
+ return RewriteMessageExpr(MessExpr);
+ }
+
+ if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
+ return RewriteObjCTryStmt(StmtTry);
+
+ if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
+ return RewriteObjCSynchronizedStmt(StmtTry);
+
+ if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
+ return RewriteObjCThrowStmt(StmtThrow);
+
+ if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
+ return RewriteObjCProtocolExpr(ProtocolExp);
+
+ if (ObjCForCollectionStmt *StmtForCollection =
+ dyn_cast<ObjCForCollectionStmt>(S))
+ return RewriteObjCForCollectionStmt(StmtForCollection,
+ OrigStmtRange.getEnd());
+ if (BreakStmt *StmtBreakStmt =
+ dyn_cast<BreakStmt>(S))
+ return RewriteBreakStmt(StmtBreakStmt);
+ if (ContinueStmt *StmtContinueStmt =
+ dyn_cast<ContinueStmt>(S))
+ return RewriteContinueStmt(StmtContinueStmt);
+
+ // Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
+ // and cast exprs.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: What we're doing here is modifying the type-specifier that
+ // precedes the first Decl. In the future the DeclGroup should have
+ // a separate type-specifier that we can rewrite.
+ RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
+
+ // Blocks rewrite rules.
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ Decl *SD = *DI;
+ if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
+ if (isTopLevelBlockPointerType(ND->getType()))
+ RewriteBlockPointerDecl(ND);
+ else if (ND->getType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(ND->getType(), ND);
+ }
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(SD)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ }
+ }
+
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
+ RewriteObjCQualifiedInterfaceTypes(CE);
+
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S)) {
+ assert(!Stmts.empty() && "Statement stack is empty");
+ assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
+ isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
+ && "Statement stack mismatch");
+ Stmts.pop_back();
+ }
+ // Handle blocks rewriting.
+ if (BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
+ if (BDRE->isByRef())
+ return RewriteBlockDeclRefExpr(BDRE);
+ }
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ Stmt *BlockCall = SynthesizeBlockCall(CE);
+ ReplaceStmt(S, BlockCall);
+ return BlockCall;
+ }
+ }
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
+ RewriteCastExpr(CE);
+ }
+#if 0
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ CastExpr *Replacement = new (Context) CastExpr(ICE->getType(), ICE->getSubExpr(), SourceLocation());
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream Buf(SStr);
+ Replacement->printPretty(Buf, *Context);
+ const std::string &Str = Buf.str();
+
+ printf("CAST = %s\n", &Str[0]);
+ InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
+ delete S;
+ return Replacement;
+ }
+#endif
+ // Return this stmt unmodified.
+ return S;
+}
+
+/// HandleDeclInMainFile - This is called for each top-level decl defined in the
+/// main file of the input.
+void RewriteObjC::HandleDeclInMainFile(Decl *D) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isOverloadedOperator())
+ return;
+
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteBlocksInFunctionProtoType(FD->getType(), FD);
+
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (CompoundStmt *Body = FD->getCompoundBody(*Context)) {
+ CurFunctionDef = FD;
+ CollectPropertySetters(Body);
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ FD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ CurFunctionDef = 0;
+ }
+ return;
+ }
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (CompoundStmt *Body = MD->getBody()) {
+ CurMethodDef = MD;
+ CollectPropertySetters(Body);
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ MD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ InsertBlockLiteralsWithinMethod(MD);
+ CurMethodDef = 0;
+ }
+ }
+ if (ObjCImplementationDecl *CI = dyn_cast<ObjCImplementationDecl>(D))
+ ClassImplementation.push_back(CI);
+ else if (ObjCCategoryImplDecl *CI = dyn_cast<ObjCCategoryImplDecl>(D))
+ CategoryImplementation.push_back(CI);
+ else if (ObjCClassDecl *CD = dyn_cast<ObjCClassDecl>(D))
+ RewriteForwardClassDecl(CD);
+ else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ RewriteObjCQualifiedInterfaceTypes(VD);
+ if (isTopLevelBlockPointerType(VD->getType()))
+ RewriteBlockPointerDecl(VD);
+ else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ }
+ if (VD->getInit()) {
+ GlobalVarDecl = VD;
+ CollectPropertySetters(VD->getInit());
+ CurrentBody = VD->getInit();
+ RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(),
+ VD->getNameAsCString());
+ GlobalVarDecl = 0;
+
+ // This is needed for blocks.
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ return;
+ }
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ return;
+ }
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ if (RD->isDefinition()) {
+ for (RecordDecl::field_iterator i = RD->field_begin(*Context),
+ e = RD->field_end(*Context); i != e; ++i) {
+ FieldDecl *FD = *i;
+ if (isTopLevelBlockPointerType(FD->getType()))
+ RewriteBlockPointerDecl(FD);
+ }
+ }
+ return;
+ }
+ // Nothing yet.
+}
+
+void RewriteObjC::HandleTranslationUnit(ASTContext &C) {
+ // Get the top-level buffer that this corresponds to.
+
+ // Rewrite tabs if we care.
+ //RewriteTabs();
+
+ if (Diags.hasErrorOccurred())
+ return;
+
+ RewriteInclude();
+
+ // Here's a great place to add any extra declarations that may be needed.
+ // Write out meta data for each @protocol(<expr>).
+ for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
+ E = ProtocolExprDecls.end(); I != E; ++I)
+ RewriteObjCProtocolMetaData(*I, "", "", Preamble);
+
+ InsertText(SM->getLocForStartOfFile(MainFileID),
+ Preamble.c_str(), Preamble.size(), false);
+ if (ClassImplementation.size() || CategoryImplementation.size())
+ RewriteImplementations();
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ //printf("Changed:\n");
+ *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ fprintf(stderr, "No changes\n");
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size() ||
+ ProtocolExprDecls.size()) {
+ // Rewrite Objective-c meta data*
+ std::string ResultStr;
+ SynthesizeMetaDataIntoBuffer(ResultStr);
+ // Emit metadata.
+ *OutFile << ResultStr;
+ }
+ OutFile->flush();
+}
+
diff --git a/lib/Frontend/RewriteTest.cpp b/lib/Frontend/RewriteTest.cpp
new file mode 100644
index 0000000..f9eb58f
--- /dev/null
+++ b/lib/Frontend/RewriteTest.cpp
@@ -0,0 +1,39 @@
+//===--- RewriteTest.cpp - Rewriter playground ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a testbed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/TokenRewriter.h"
+#include "llvm/Support/raw_ostream.h"
+
+void clang::DoRewriteTest(Preprocessor &PP, llvm::raw_ostream* OS) {
+ SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOptions();
+
+ TokenRewriter Rewriter(SM.getMainFileID(), SM, LangOpts);
+
+ // Throw <i> </i> tags around comments.
+ for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
+ E = Rewriter.token_end(); I != E; ++I) {
+ if (I->isNot(tok::comment)) continue;
+
+ Rewriter.AddTokenBefore(I, "<i>");
+ Rewriter.AddTokenAfter(I, "</i>");
+ }
+
+
+ // Print out the output.
+ for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
+ E = Rewriter.token_end(); I != E; ++I)
+ *OS << PP.getSpelling(*I);
+}
diff --git a/lib/Frontend/StmtXML.cpp b/lib/Frontend/StmtXML.cpp
new file mode 100644
index 0000000..c861881
--- /dev/null
+++ b/lib/Frontend/StmtXML.cpp
@@ -0,0 +1,409 @@
+//===--- StmtXML.cpp - XML implementation for Stmt ASTs ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dumpXML methods, which dump out the
+// AST to an XML document.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/DocumentXML.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Compiler.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtXML Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VISIBILITY_HIDDEN StmtXML : public StmtVisitor<StmtXML> {
+ DocumentXML& Doc;
+
+ static const char *getOpcodeStr(UnaryOperator::Opcode Op);
+ static const char *getOpcodeStr(BinaryOperator::Opcode Op);
+
+ public:
+ StmtXML(DocumentXML& doc)
+ : Doc(doc) {
+ }
+
+ void DumpSubTree(Stmt *S) {
+ if (S)
+ {
+ Doc.addSubNode(S->getStmtClassName());
+ Doc.addLocationRange(S->getSourceRange());
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(S)) {
+ VisitDeclStmt(DS);
+ } else {
+ Visit(S);
+ for (Stmt::child_iterator i = S->child_begin(), e = S->child_end(); i != e; ++i)
+ {
+ DumpSubTree(*i);
+ }
+ }
+ Doc.toParent();
+ } else {
+ Doc.addSubNode("NULL").toParent();
+ }
+ }
+
+ void DumpTypeExpr(const QualType& T)
+ {
+ Doc.addSubNode("TypeExpr");
+ Doc.addTypeAttribute(T);
+ Doc.toParent();
+ }
+
+ void DumpExpr(const Expr *Node) {
+ Doc.addTypeAttribute(Node->getType());
+ }
+
+ // Stmts.
+ void VisitStmt(Stmt *Node);
+ void VisitDeclStmt(DeclStmt *Node);
+ void VisitLabelStmt(LabelStmt *Node);
+ void VisitGotoStmt(GotoStmt *Node);
+
+ // Exprs
+ void VisitExpr(Expr *Node);
+ void VisitDeclRefExpr(DeclRefExpr *Node);
+ void VisitPredefinedExpr(PredefinedExpr *Node);
+ void VisitCharacterLiteral(CharacterLiteral *Node);
+ void VisitIntegerLiteral(IntegerLiteral *Node);
+ void VisitFloatingLiteral(FloatingLiteral *Node);
+ void VisitStringLiteral(StringLiteral *Str);
+ void VisitUnaryOperator(UnaryOperator *Node);
+ void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node);
+ void VisitMemberExpr(MemberExpr *Node);
+ void VisitExtVectorElementExpr(ExtVectorElementExpr *Node);
+ void VisitBinaryOperator(BinaryOperator *Node);
+ void VisitCompoundAssignOperator(CompoundAssignOperator *Node);
+ void VisitAddrLabelExpr(AddrLabelExpr *Node);
+ void VisitTypesCompatibleExpr(TypesCompatibleExpr *Node);
+
+ // C++
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
+ void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node);
+ void VisitCXXThisExpr(CXXThisExpr *Node);
+ void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node);
+
+ // ObjC
+ void VisitObjCEncodeExpr(ObjCEncodeExpr *Node);
+ void VisitObjCMessageExpr(ObjCMessageExpr* Node);
+ void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
+ void VisitObjCProtocolExpr(ObjCProtocolExpr *Node);
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node);
+ void VisitObjCKVCRefExpr(ObjCKVCRefExpr *Node);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
+ void VisitObjCSuperExpr(ObjCSuperExpr *Node);
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtXML::VisitStmt(Stmt *Node)
+{
+ // nothing special to do
+}
+
+void StmtXML::VisitDeclStmt(DeclStmt *Node)
+{
+ for (DeclStmt::decl_iterator DI = Node->decl_begin(), DE = Node->decl_end();
+ DI != DE; ++DI)
+ {
+ Doc.PrintDecl(*DI);
+ }
+}
+
+void StmtXML::VisitLabelStmt(LabelStmt *Node)
+{
+ Doc.addAttribute("name", Node->getName());
+}
+
+void StmtXML::VisitGotoStmt(GotoStmt *Node)
+{
+ Doc.addAttribute("name", Node->getLabel()->getName());
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtXML::VisitExpr(Expr *Node) {
+ DumpExpr(Node);
+}
+
+void StmtXML::VisitDeclRefExpr(DeclRefExpr *Node) {
+ DumpExpr(Node);
+
+ const char* pKind;
+ switch (Node->getDecl()->getKind()) {
+ case Decl::Function: pKind = "FunctionDecl"; break;
+ case Decl::Var: pKind = "Var"; break;
+ case Decl::ParmVar: pKind = "ParmVar"; break;
+ case Decl::EnumConstant: pKind = "EnumConstant"; break;
+ case Decl::Typedef: pKind = "Typedef"; break;
+ case Decl::Record: pKind = "Record"; break;
+ case Decl::Enum: pKind = "Enum"; break;
+ case Decl::CXXRecord: pKind = "CXXRecord"; break;
+ case Decl::ObjCInterface: pKind = "ObjCInterface"; break;
+ case Decl::ObjCClass: pKind = "ObjCClass"; break;
+ default: pKind = "Decl"; break;
+ }
+
+ Doc.addAttribute("kind", pKind);
+ Doc.addAttribute("name", Node->getDecl()->getNameAsString());
+ Doc.addRefAttribute(Node->getDecl());
+}
+
+void StmtXML::VisitPredefinedExpr(PredefinedExpr *Node) {
+ DumpExpr(Node);
+ switch (Node->getIdentType()) {
+ default: assert(0 && "unknown case");
+ case PredefinedExpr::Func: Doc.addAttribute("predefined", " __func__"); break;
+ case PredefinedExpr::Function: Doc.addAttribute("predefined", " __FUNCTION__"); break;
+ case PredefinedExpr::PrettyFunction: Doc.addAttribute("predefined", " __PRETTY_FUNCTION__");break;
+ }
+}
+
+void StmtXML::VisitCharacterLiteral(CharacterLiteral *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("value", Node->getValue());
+}
+
+void StmtXML::VisitIntegerLiteral(IntegerLiteral *Node) {
+ DumpExpr(Node);
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ Doc.addAttribute("value", Node->getValue().toString(10, isSigned));
+}
+
+void StmtXML::VisitFloatingLiteral(FloatingLiteral *Node) {
+ DumpExpr(Node);
+ // FIXME: output float as written in source (no approximation or the like)
+ //Doc.addAttribute("value", Node->getValueAsApproximateDouble()));
+ Doc.addAttribute("value", "FIXME");
+}
+
+void StmtXML::VisitStringLiteral(StringLiteral *Str) {
+ DumpExpr(Str);
+ if (Str->isWide())
+ Doc.addAttribute("is_wide", "1");
+
+ Doc.addAttribute("value", Doc.escapeString(Str->getStrData(), Str->getByteLength()));
+}
+
+
+const char *StmtXML::getOpcodeStr(UnaryOperator::Opcode Op) {
+ switch (Op) {
+ default: assert(0 && "Unknown unary operator");
+ case UnaryOperator::PostInc: return "postinc";
+ case UnaryOperator::PostDec: return "postdec";
+ case UnaryOperator::PreInc: return "preinc";
+ case UnaryOperator::PreDec: return "predec";
+ case UnaryOperator::AddrOf: return "addrof";
+ case UnaryOperator::Deref: return "deref";
+ case UnaryOperator::Plus: return "plus";
+ case UnaryOperator::Minus: return "minus";
+ case UnaryOperator::Not: return "not";
+ case UnaryOperator::LNot: return "lnot";
+ case UnaryOperator::Real: return "__real";
+ case UnaryOperator::Imag: return "__imag";
+ case UnaryOperator::Extension: return "__extension__";
+ case UnaryOperator::OffsetOf: return "__builtin_offsetof";
+ }
+}
+
+
+const char *StmtXML::getOpcodeStr(BinaryOperator::Opcode Op) {
+ switch (Op) {
+ default: assert(0 && "Unknown binary operator");
+ case BinaryOperator::PtrMemD: return "ptrmemd";
+ case BinaryOperator::PtrMemI: return "ptrmemi";
+ case BinaryOperator::Mul: return "mul";
+ case BinaryOperator::Div: return "div";
+ case BinaryOperator::Rem: return "rem";
+ case BinaryOperator::Add: return "add";
+ case BinaryOperator::Sub: return "sub";
+ case BinaryOperator::Shl: return "shl";
+ case BinaryOperator::Shr: return "shr";
+ case BinaryOperator::LT: return "lt";
+ case BinaryOperator::GT: return "gt";
+ case BinaryOperator::LE: return "le";
+ case BinaryOperator::GE: return "ge";
+ case BinaryOperator::EQ: return "eq";
+ case BinaryOperator::NE: return "ne";
+ case BinaryOperator::And: return "and";
+ case BinaryOperator::Xor: return "xor";
+ case BinaryOperator::Or: return "or";
+ case BinaryOperator::LAnd: return "land";
+ case BinaryOperator::LOr: return "lor";
+ case BinaryOperator::Assign: return "assign";
+ case BinaryOperator::MulAssign: return "mulassign";
+ case BinaryOperator::DivAssign: return "divassign";
+ case BinaryOperator::RemAssign: return "remassign";
+ case BinaryOperator::AddAssign: return "addassign";
+ case BinaryOperator::SubAssign: return "subassign";
+ case BinaryOperator::ShlAssign: return "shlassign";
+ case BinaryOperator::ShrAssign: return "shrassign";
+ case BinaryOperator::AndAssign: return "andassign";
+ case BinaryOperator::XorAssign: return "xorassign";
+ case BinaryOperator::OrAssign: return "orassign";
+ case BinaryOperator::Comma: return "comma";
+ }
+}
+
+void StmtXML::VisitUnaryOperator(UnaryOperator *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("op_code", getOpcodeStr(Node->getOpcode()));
+}
+
+void StmtXML::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("is_sizeof", Node->isSizeOf() ? "sizeof" : "alignof");
+ Doc.addAttribute("is_type", Node->isArgumentType() ? "1" : "0");
+ if (Node->isArgumentType())
+ {
+ DumpTypeExpr(Node->getArgumentType());
+ }
+}
+
+void StmtXML::VisitMemberExpr(MemberExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("is_deref", Node->isArrow() ? "1" : "0");
+ Doc.addAttribute("name", Node->getMemberDecl()->getNameAsString());
+ Doc.addRefAttribute(Node->getMemberDecl());
+}
+
+void StmtXML::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("name", Node->getAccessor().getName());
+}
+
+void StmtXML::VisitBinaryOperator(BinaryOperator *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("op_code", getOpcodeStr(Node->getOpcode()));
+}
+
+void StmtXML::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ VisitBinaryOperator(Node);
+/* FIXME: is this needed in the AST?
+ DumpExpr(Node);
+ CurrentNode = CurrentNode->addSubNode("ComputeLHSTy");
+ DumpType(Node->getComputationLHSType());
+ CurrentNode = CurrentNode->Parent->addSubNode("ComputeResultTy");
+ DumpType(Node->getComputationResultType());
+ Doc.toParent();
+*/
+}
+
+// GNU extensions.
+
+void StmtXML::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("name", Node->getLabel()->getName());
+}
+
+void StmtXML::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
+ DumpExpr(Node);
+ DumpTypeExpr(Node->getArgType1());
+ DumpTypeExpr(Node->getArgType2());
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtXML::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("kind", Node->getCastName());
+ DumpTypeExpr(Node->getTypeAsWritten());
+}
+
+void StmtXML::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("value", Node->getValue() ? "true" : "false");
+}
+
+void StmtXML::VisitCXXThisExpr(CXXThisExpr *Node) {
+ DumpExpr(Node);
+}
+
+void StmtXML::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ DumpExpr(Node);
+ DumpTypeExpr(Node->getTypeAsWritten());
+}
+
+//===----------------------------------------------------------------------===//
+// Obj-C Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtXML::VisitObjCMessageExpr(ObjCMessageExpr* Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("selector", Node->getSelector().getAsString());
+ IdentifierInfo* clsName = Node->getClassName();
+ if (clsName)
+ Doc.addAttribute("class", clsName->getName());
+}
+
+void StmtXML::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ DumpExpr(Node);
+ DumpTypeExpr(Node->getEncodedType());
+}
+
+void StmtXML::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("selector", Node->getSelector().getAsString());
+}
+
+void StmtXML::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("protocol", Node->getProtocol()->getNameAsString());
+}
+
+void StmtXML::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("property", Node->getProperty()->getNameAsString());
+}
+
+void StmtXML::VisitObjCKVCRefExpr(ObjCKVCRefExpr *Node) {
+ DumpExpr(Node);
+ ObjCMethodDecl *Getter = Node->getGetterMethod();
+ ObjCMethodDecl *Setter = Node->getSetterMethod();
+ Doc.addAttribute("Getter", Getter->getSelector().getAsString());
+ Doc.addAttribute("Setter", Setter ? Setter->getSelector().getAsString().c_str() : "(null)");
+}
+
+void StmtXML::VisitObjCSuperExpr(ObjCSuperExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("super", "1");
+}
+
+void StmtXML::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ DumpExpr(Node);
+ Doc.addAttribute("kind", Node->getDecl()->getDeclKindName());
+ Doc.addAttribute("decl", Node->getDecl()->getNameAsString());
+ if (Node->isFreeIvar())
+ Doc.addAttribute("isFreeIvar", "1");
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+void DocumentXML::PrintStmt(const Stmt *S) {
+ StmtXML P(*this);
+ P.DumpSubTree(const_cast<Stmt*>(S));
+}
+
diff --git a/lib/Frontend/TextDiagnosticBuffer.cpp b/lib/Frontend/TextDiagnosticBuffer.cpp
new file mode 100644
index 0000000..a4518ee
--- /dev/null
+++ b/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -0,0 +1,39 @@
+//===--- TextDiagnosticBuffer.cpp - Buffer Text Diagnostics ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which buffers the diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+/// HandleDiagnostic - Store the errors, warnings, and notes that are
+/// reported.
+///
+void TextDiagnosticBuffer::HandleDiagnostic(Diagnostic::Level Level,
+ const DiagnosticInfo &Info) {
+ llvm::SmallString<100> StrC;
+ Info.FormatDiagnostic(StrC);
+ std::string Str(StrC.begin(), StrC.end());
+ switch (Level) {
+ default: assert(0 && "Diagnostic not handled during diagnostic buffering!");
+ case Diagnostic::Note:
+ Notes.push_back(std::make_pair(Info.getLocation(), Str));
+ break;
+ case Diagnostic::Warning:
+ Warnings.push_back(std::make_pair(Info.getLocation(), Str));
+ break;
+ case Diagnostic::Error:
+ case Diagnostic::Fatal:
+ Errors.push_back(std::make_pair(Info.getLocation(), Str));
+ break;
+ }
+}
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
new file mode 100644
index 0000000..b1c0533
--- /dev/null
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -0,0 +1,710 @@
+//===--- TextDiagnosticPrinter.cpp - Diagnostic Printer -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This diagnostic client prints out their diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+using namespace clang;
+
+/// \brief Number of spaces to indent when word-wrapping.
+const unsigned WordWrapIndentation = 6;
+
+void TextDiagnosticPrinter::
+PrintIncludeStack(SourceLocation Loc, const SourceManager &SM) {
+ if (Loc.isInvalid()) return;
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ // Print out the other include frames first.
+ PrintIncludeStack(PLoc.getIncludeLoc(), SM);
+
+ if (ShowLocation)
+ OS << "In file included from " << PLoc.getFilename()
+ << ':' << PLoc.getLine() << ":\n";
+ else
+ OS << "In included file:\n";
+}
+
+/// HighlightRange - Given a SourceRange and a line number, highlight (with ~'s)
+/// any characters in LineNo that intersect the SourceRange.
+void TextDiagnosticPrinter::HighlightRange(const SourceRange &R,
+ const SourceManager &SM,
+ unsigned LineNo, FileID FID,
+ std::string &CaretLine,
+ const std::string &SourceLine) {
+ assert(CaretLine.size() == SourceLine.size() &&
+ "Expect a correspondence between source and caret line!");
+ if (!R.isValid()) return;
+
+ SourceLocation Begin = SM.getInstantiationLoc(R.getBegin());
+ SourceLocation End = SM.getInstantiationLoc(R.getEnd());
+
+ // If the End location and the start location are the same and are a macro
+ // location, then the range was something that came from a macro expansion
+ // or _Pragma. If this is an object-like macro, the best we can do is to
+ // highlight the range. If this is a function-like macro, we'd also like to
+ // highlight the arguments.
+ if (Begin == End && R.getEnd().isMacroID())
+ End = SM.getInstantiationRange(R.getEnd()).second;
+
+ unsigned StartLineNo = SM.getInstantiationLineNumber(Begin);
+ if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
+ return; // No intersection.
+
+ unsigned EndLineNo = SM.getInstantiationLineNumber(End);
+ if (EndLineNo < LineNo || SM.getFileID(End) != FID)
+ return; // No intersection.
+
+ // Compute the column number of the start.
+ unsigned StartColNo = 0;
+ if (StartLineNo == LineNo) {
+ StartColNo = SM.getInstantiationColumnNumber(Begin);
+ if (StartColNo) --StartColNo; // Zero base the col #.
+ }
+
+ // Pick the first non-whitespace column.
+ while (StartColNo < SourceLine.size() &&
+ (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t'))
+ ++StartColNo;
+
+ // Compute the column number of the end.
+ unsigned EndColNo = CaretLine.size();
+ if (EndLineNo == LineNo) {
+ EndColNo = SM.getInstantiationColumnNumber(End);
+ if (EndColNo) {
+ --EndColNo; // Zero base the col #.
+
+ // Add in the length of the token, so that we cover multi-char tokens.
+ EndColNo += Lexer::MeasureTokenLength(End, SM, *LangOpts);
+ } else {
+ EndColNo = CaretLine.size();
+ }
+ }
+
+ // Pick the last non-whitespace column.
+ if (EndColNo <= SourceLine.size())
+ while (EndColNo-1 &&
+ (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t'))
+ --EndColNo;
+ else
+ EndColNo = SourceLine.size();
+
+ // Fill the range with ~'s.
+ assert(StartColNo <= EndColNo && "Invalid range!");
+ for (unsigned i = StartColNo; i < EndColNo; ++i)
+ CaretLine[i] = '~';
+}
+
+/// \brief When the source code line we want to print is too long for
+/// the terminal, select the "interesting" region.
+static void SelectInterestingSourceRegion(std::string &SourceLine,
+ std::string &CaretLine,
+ std::string &FixItInsertionLine,
+ unsigned EndOfCaretToken,
+ unsigned Columns) {
+ if (CaretLine.size() > SourceLine.size())
+ SourceLine.resize(CaretLine.size(), ' ');
+
+ // Find the slice that we need to display the full caret line
+ // correctly.
+ unsigned CaretStart = 0, CaretEnd = CaretLine.size();
+ for (; CaretStart != CaretEnd; ++CaretStart)
+ if (!isspace(CaretLine[CaretStart]))
+ break;
+
+ for (; CaretEnd != CaretStart; --CaretEnd)
+ if (!isspace(CaretLine[CaretEnd - 1]))
+ break;
+
+ // Make sure we don't chop the string shorter than the caret token
+ // itself.
+ if (CaretEnd < EndOfCaretToken)
+ CaretEnd = EndOfCaretToken;
+
+ // If we have a fix-it line, make sure the slice includes all of the
+ // fix-it information.
+ if (!FixItInsertionLine.empty()) {
+ unsigned FixItStart = 0, FixItEnd = FixItInsertionLine.size();
+ for (; FixItStart != FixItEnd; ++FixItStart)
+ if (!isspace(FixItInsertionLine[FixItStart]))
+ break;
+
+ for (; FixItEnd != FixItStart; --FixItEnd)
+ if (!isspace(FixItInsertionLine[FixItEnd - 1]))
+ break;
+
+ if (FixItStart < CaretStart)
+ CaretStart = FixItStart;
+ if (FixItEnd > CaretEnd)
+ CaretEnd = FixItEnd;
+ }
+
+ // CaretLine[CaretStart, CaretEnd) contains all of the interesting
+ // parts of the caret line. While this slice is smaller than the
+ // number of columns we have, try to grow the slice to encompass
+ // more context.
+
+ // If the end of the interesting region comes before we run out of
+ // space in the terminal, start at the beginning of the line.
+ if (Columns > 3 && CaretEnd < Columns - 3)
+ CaretStart = 0;
+
+ unsigned TargetColumns = Columns;
+ if (TargetColumns > 8)
+ TargetColumns -= 8; // Give us extra room for the ellipses.
+ unsigned SourceLength = SourceLine.size();
+ while ((CaretEnd - CaretStart) < TargetColumns) {
+ bool ExpandedRegion = false;
+ // Move the start of the interesting region left until we've
+ // pulled in something else interesting.
+ if (CaretStart == 1)
+ CaretStart = 0;
+ else if (CaretStart > 1) {
+ unsigned NewStart = CaretStart - 1;
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ while (NewStart && isspace(SourceLine[NewStart]))
+ --NewStart;
+
+ // Skip over this bit of "interesting" text.
+ while (NewStart && !isspace(SourceLine[NewStart]))
+ --NewStart;
+
+ // Move up to the non-whitespace character we just saw.
+ if (NewStart)
+ ++NewStart;
+
+ // If we're still within our limit, update the starting
+ // position within the source/caret line.
+ if (CaretEnd - NewStart <= TargetColumns) {
+ CaretStart = NewStart;
+ ExpandedRegion = true;
+ }
+ }
+
+ // Move the end of the interesting region right until we've
+ // pulled in something else interesting.
+ if (CaretEnd != SourceLength) {
+ unsigned NewEnd = CaretEnd;
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ while (NewEnd != SourceLength && isspace(SourceLine[NewEnd - 1]))
+ ++NewEnd;
+
+ // Skip over this bit of "interesting" text.
+ while (NewEnd != SourceLength && !isspace(SourceLine[NewEnd - 1]))
+ ++NewEnd;
+
+ if (NewEnd - CaretStart <= TargetColumns) {
+ CaretEnd = NewEnd;
+ ExpandedRegion = true;
+ }
+ }
+
+ if (!ExpandedRegion)
+ break;
+ }
+
+ // [CaretStart, CaretEnd) is the slice we want. Update the various
+ // output lines to show only this slice, with two-space padding
+ // before the lines so that it looks nicer.
+ if (CaretEnd < SourceLine.size())
+ SourceLine.replace(CaretEnd, std::string::npos, "...");
+ if (CaretEnd < CaretLine.size())
+ CaretLine.erase(CaretEnd, std::string::npos);
+ if (FixItInsertionLine.size() > CaretEnd)
+ FixItInsertionLine.erase(CaretEnd, std::string::npos);
+
+ if (CaretStart > 2) {
+ SourceLine.replace(0, CaretStart, " ...");
+ CaretLine.replace(0, CaretStart, " ");
+ if (FixItInsertionLine.size() >= CaretStart)
+ FixItInsertionLine.replace(0, CaretStart, " ");
+ }
+}
+
+void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
+ SourceRange *Ranges,
+ unsigned NumRanges,
+ SourceManager &SM,
+ const CodeModificationHint *Hints,
+ unsigned NumHints,
+ unsigned Columns) {
+ assert(!Loc.isInvalid() && "must have a valid source location here");
+
+ // If this is a macro ID, first emit information about where this was
+ // instantiated (recursively) then emit information about where. the token was
+ // spelled from.
+ if (!Loc.isFileID()) {
+ SourceLocation OneLevelUp = SM.getImmediateInstantiationRange(Loc).first;
+ // FIXME: Map ranges?
+ EmitCaretDiagnostic(OneLevelUp, Ranges, NumRanges, SM, 0, 0, Columns);
+
+ Loc = SM.getImmediateSpellingLoc(Loc);
+
+ // Map the ranges.
+ for (unsigned i = 0; i != NumRanges; ++i) {
+ SourceLocation S = Ranges[i].getBegin(), E = Ranges[i].getEnd();
+ if (S.isMacroID()) S = SM.getImmediateSpellingLoc(S);
+ if (E.isMacroID()) E = SM.getImmediateSpellingLoc(E);
+ Ranges[i] = SourceRange(S, E);
+ }
+
+ if (ShowLocation) {
+ std::pair<FileID, unsigned> IInfo = SM.getDecomposedInstantiationLoc(Loc);
+
+ // Emit the file/line/column that this expansion came from.
+ OS << SM.getBuffer(IInfo.first)->getBufferIdentifier() << ':'
+ << SM.getLineNumber(IInfo.first, IInfo.second) << ':';
+ if (ShowColumn)
+ OS << SM.getColumnNumber(IInfo.first, IInfo.second) << ':';
+ OS << ' ';
+ }
+ OS << "note: instantiated from:\n";
+
+ EmitCaretDiagnostic(Loc, Ranges, NumRanges, SM, Hints, NumHints, Columns);
+ return;
+ }
+
+ // Decompose the location into a FID/Offset pair.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ FileID FID = LocInfo.first;
+ unsigned FileOffset = LocInfo.second;
+
+ // Get information about the buffer it points into.
+ std::pair<const char*, const char*> BufferInfo = SM.getBufferData(FID);
+ const char *BufStart = BufferInfo.first;
+
+ unsigned ColNo = SM.getColumnNumber(FID, FileOffset);
+ unsigned CaretEndColNo
+ = ColNo + Lexer::MeasureTokenLength(Loc, SM, *LangOpts);
+
+ // Rewind from the current position to the start of the line.
+ const char *TokPtr = BufStart+FileOffset;
+ const char *LineStart = TokPtr-ColNo+1; // Column # is 1-based.
+
+
+ // Compute the line end. Scan forward from the error position to the end of
+ // the line.
+ const char *LineEnd = TokPtr;
+ while (*LineEnd != '\n' && *LineEnd != '\r' && *LineEnd != '\0')
+ ++LineEnd;
+
+ // Copy the line of code into an std::string for ease of manipulation.
+ std::string SourceLine(LineStart, LineEnd);
+
+ // Create a line for the caret that is filled with spaces that is the same
+ // length as the line of source code.
+ std::string CaretLine(LineEnd-LineStart, ' ');
+
+ // Highlight all of the characters covered by Ranges with ~ characters.
+ if (NumRanges) {
+ unsigned LineNo = SM.getLineNumber(FID, FileOffset);
+
+ for (unsigned i = 0, e = NumRanges; i != e; ++i)
+ HighlightRange(Ranges[i], SM, LineNo, FID, CaretLine, SourceLine);
+ }
+
+ // Next, insert the caret itself.
+ if (ColNo-1 < CaretLine.size())
+ CaretLine[ColNo-1] = '^';
+ else
+ CaretLine.push_back('^');
+
+ // Scan the source line, looking for tabs. If we find any, manually expand
+ // them to 8 characters and update the CaretLine to match.
+ for (unsigned i = 0; i != SourceLine.size(); ++i) {
+ if (SourceLine[i] != '\t') continue;
+
+ // Replace this tab with at least one space.
+ SourceLine[i] = ' ';
+
+ // Compute the number of spaces we need to insert.
+ unsigned NumSpaces = ((i+8)&~7) - (i+1);
+ assert(NumSpaces < 8 && "Invalid computation of space amt");
+
+ // Insert spaces into the SourceLine.
+ SourceLine.insert(i+1, NumSpaces, ' ');
+
+ // Insert spaces or ~'s into CaretLine.
+ CaretLine.insert(i+1, NumSpaces, CaretLine[i] == '~' ? '~' : ' ');
+ }
+
+ // If we are in -fdiagnostics-print-source-range-info mode, we are trying to
+ // produce easily machine parsable output. Add a space before the source line
+ // and the caret to make it trivial to tell the main diagnostic line from what
+ // the user is intended to see.
+ if (PrintRangeInfo) {
+ SourceLine = ' ' + SourceLine;
+ CaretLine = ' ' + CaretLine;
+ }
+
+ std::string FixItInsertionLine;
+ if (NumHints && PrintFixItInfo) {
+ for (const CodeModificationHint *Hint = Hints, *LastHint = Hints + NumHints;
+ Hint != LastHint; ++Hint) {
+ if (Hint->InsertionLoc.isValid()) {
+ // We have an insertion hint. Determine whether the inserted
+ // code is on the same line as the caret.
+ std::pair<FileID, unsigned> HintLocInfo
+ = SM.getDecomposedInstantiationLoc(Hint->InsertionLoc);
+ if (SM.getLineNumber(HintLocInfo.first, HintLocInfo.second) ==
+ SM.getLineNumber(FID, FileOffset)) {
+ // Insert the new code into the line just below the code
+ // that the user wrote.
+ unsigned HintColNo
+ = SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second);
+ unsigned LastColumnModified
+ = HintColNo - 1 + Hint->CodeToInsert.size();
+ if (LastColumnModified > FixItInsertionLine.size())
+ FixItInsertionLine.resize(LastColumnModified, ' ');
+ std::copy(Hint->CodeToInsert.begin(), Hint->CodeToInsert.end(),
+ FixItInsertionLine.begin() + HintColNo - 1);
+ } else {
+ FixItInsertionLine.clear();
+ break;
+ }
+ }
+ }
+ }
+
+ // If the source line is too long for our terminal, select only the
+ // "interesting" source region within that line.
+ if (Columns && SourceLine.size() > Columns)
+ SelectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
+ CaretEndColNo, Columns);
+
+ // Finally, remove any blank spaces from the end of CaretLine.
+ while (CaretLine[CaretLine.size()-1] == ' ')
+ CaretLine.erase(CaretLine.end()-1);
+
+ // Emit what we have computed.
+ OS << SourceLine << '\n';
+ OS << CaretLine << '\n';
+
+ if (!FixItInsertionLine.empty()) {
+ if (PrintRangeInfo)
+ OS << ' ';
+ OS << FixItInsertionLine << '\n';
+ }
+}
+
+/// \brief Skip over whitespace in the string, starting at the given
+/// index.
+///
+/// \returns The index of the first non-whitespace character that is
+/// greater than or equal to Idx or, if no such character exists,
+/// returns the end of the string.
+static unsigned skipWhitespace(unsigned Idx,
+ const llvm::SmallVectorImpl<char> &Str,
+ unsigned Length) {
+ while (Idx < Length && isspace(Str[Idx]))
+ ++Idx;
+ return Idx;
+}
+
+/// \brief If the given character is the start of some kind of
+/// balanced punctuation (e.g., quotes or parentheses), return the
+/// character that will terminate the punctuation.
+///
+/// \returns The ending punctuation character, if any, or the NULL
+/// character if the input character does not start any punctuation.
+static inline char findMatchingPunctuation(char c) {
+ switch (c) {
+ case '\'': return '\'';
+ case '`': return '\'';
+ case '"': return '"';
+ case '(': return ')';
+ case '[': return ']';
+ case '{': return '}';
+ default: break;
+ }
+
+ return 0;
+}
+
+/// \brief Find the end of the word starting at the given offset
+/// within a string.
+///
+/// \returns the index pointing one character past the end of the
+/// word.
+unsigned findEndOfWord(unsigned Start,
+ const llvm::SmallVectorImpl<char> &Str,
+ unsigned Length, unsigned Column,
+ unsigned Columns) {
+ unsigned End = Start + 1;
+
+ // Determine if the start of the string is actually opening
+ // punctuation, e.g., a quote or parentheses.
+ char EndPunct = findMatchingPunctuation(Str[Start]);
+ if (!EndPunct) {
+ // This is a normal word. Just find the first space character.
+ while (End < Length && !isspace(Str[End]))
+ ++End;
+ return End;
+ }
+
+ // We have the start of a balanced punctuation sequence (quotes,
+ // parentheses, etc.). Determine the full sequence is.
+ llvm::SmallVector<char, 16> PunctuationEndStack;
+ PunctuationEndStack.push_back(EndPunct);
+ while (End < Length && !PunctuationEndStack.empty()) {
+ if (Str[End] == PunctuationEndStack.back())
+ PunctuationEndStack.pop_back();
+ else if (char SubEndPunct = findMatchingPunctuation(Str[End]))
+ PunctuationEndStack.push_back(SubEndPunct);
+
+ ++End;
+ }
+
+ // Find the first space character after the punctuation ended.
+ while (End < Length && !isspace(Str[End]))
+ ++End;
+
+ unsigned PunctWordLength = End - Start;
+ if (// If the word fits on this line
+ Column + PunctWordLength <= Columns ||
+ // ... or the word is "short enough" to take up the next line
+ // without too much ugly white space
+ PunctWordLength < Columns/3)
+ return End; // Take the whole thing as a single "word".
+
+ // The whole quoted/parenthesized string is too long to print as a
+ // single "word". Instead, find the "word" that starts just after
+ // the punctuation and use that end-point instead. This will recurse
+ // until it finds something small enough to consider a word.
+ return findEndOfWord(Start + 1, Str, Length, Column + 1, Columns);
+}
+
+/// \brief Print the given string to a stream, word-wrapping it to
+/// some number of columns in the process.
+///
+/// \brief OS the stream to which the word-wrapping string will be
+/// emitted.
+///
+/// \brief Str the string to word-wrap and output.
+///
+/// \brief Columns the number of columns to word-wrap to.
+///
+/// \brief Column the column number at which the first character of \p
+/// Str will be printed. This will be non-zero when part of the first
+/// line has already been printed.
+///
+/// \brief Indentation the number of spaces to indent any lines beyond
+/// the first line.
+///
+/// \returns true if word-wrapping was required, or false if the
+/// string fit on the first line.
+static bool PrintWordWrapped(llvm::raw_ostream &OS,
+ const llvm::SmallVectorImpl<char> &Str,
+ unsigned Columns,
+ unsigned Column = 0,
+ unsigned Indentation = WordWrapIndentation) {
+ unsigned Length = Str.size();
+
+ // If there is a newline in this message somewhere, find that
+ // newline and split the message into the part before the newline
+ // (which will be word-wrapped) and the part from the newline one
+ // (which will be emitted unchanged).
+ for (unsigned I = 0; I != Length; ++I)
+ if (Str[I] == '\n') {
+ Length = I;
+ break;
+ }
+
+ // The string used to indent each line.
+ llvm::SmallString<16> IndentStr;
+ IndentStr.assign(Indentation, ' ');
+ bool Wrapped = false;
+ for (unsigned WordStart = 0, WordEnd; WordStart < Length;
+ WordStart = WordEnd) {
+ // Find the beginning of the next word.
+ WordStart = skipWhitespace(WordStart, Str, Length);
+ if (WordStart == Length)
+ break;
+
+ // Find the end of this word.
+ WordEnd = findEndOfWord(WordStart, Str, Length, Column, Columns);
+
+ // Does this word fit on the current line?
+ unsigned WordLength = WordEnd - WordStart;
+ if (Column + WordLength < Columns) {
+ // This word fits on the current line; print it there.
+ if (WordStart) {
+ OS << ' ';
+ Column += 1;
+ }
+ OS.write(&Str[WordStart], WordLength);
+ Column += WordLength;
+ continue;
+ }
+
+ // This word does not fit on the current line, so wrap to the next
+ // line.
+ OS << '\n';
+ OS.write(&IndentStr[0], Indentation);
+ OS.write(&Str[WordStart], WordLength);
+ Column = Indentation + WordLength;
+ Wrapped = true;
+ }
+
+ if (Length == Str.size())
+ return Wrapped; // We're done.
+
+ // There is a newline in the message, followed by something that
+ // will not be word-wrapped. Print that.
+ OS.write(&Str[Length], Str.size() - Length);
+ return true;
+}
+
+void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
+ const DiagnosticInfo &Info) {
+ // Keeps track of the the starting position of the location
+ // information (e.g., "foo.c:10:4:") that precedes the error
+ // message. We use this information to determine how long the
+ // file+line+column number prefix is.
+ uint64_t StartOfLocationInfo = OS.tell();
+
+ // If the location is specified, print out a file/line/col and include trace
+ // if enabled.
+ if (Info.getLocation().isValid()) {
+ const SourceManager &SM = Info.getLocation().getManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Info.getLocation());
+ unsigned LineNo = PLoc.getLine();
+
+ // First, if this diagnostic is not in the main file, print out the
+ // "included from" lines.
+ if (LastWarningLoc != PLoc.getIncludeLoc()) {
+ LastWarningLoc = PLoc.getIncludeLoc();
+ PrintIncludeStack(LastWarningLoc, SM);
+ StartOfLocationInfo = OS.tell();
+ }
+
+ // Compute the column number.
+ if (ShowLocation) {
+ OS << PLoc.getFilename() << ':' << LineNo << ':';
+ if (ShowColumn)
+ if (unsigned ColNo = PLoc.getColumn())
+ OS << ColNo << ':';
+
+ if (PrintRangeInfo && Info.getNumRanges()) {
+ FileID CaretFileID =
+ SM.getFileID(SM.getInstantiationLoc(Info.getLocation()));
+ bool PrintedRange = false;
+
+ for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i) {
+ // Ignore invalid ranges.
+ if (!Info.getRange(i).isValid()) continue;
+
+ SourceLocation B = Info.getRange(i).getBegin();
+ SourceLocation E = Info.getRange(i).getEnd();
+ std::pair<FileID, unsigned> BInfo=SM.getDecomposedInstantiationLoc(B);
+
+ E = SM.getInstantiationLoc(E);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
+
+ // If the start or end of the range is in another file, just discard
+ // it.
+ if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ continue;
+
+ // Add in the length of the token, so that we cover multi-char tokens.
+ unsigned TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts);
+
+ OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << (SM.getColumnNumber(EInfo.first, EInfo.second)+TokSize) << '}';
+ PrintedRange = true;
+ }
+
+ if (PrintedRange)
+ OS << ':';
+ }
+ OS << ' ';
+ }
+ }
+
+ switch (Level) {
+ case Diagnostic::Ignored: assert(0 && "Invalid diagnostic type");
+ case Diagnostic::Note: OS << "note: "; break;
+ case Diagnostic::Warning: OS << "warning: "; break;
+ case Diagnostic::Error: OS << "error: "; break;
+ case Diagnostic::Fatal: OS << "fatal error: "; break;
+ }
+
+ llvm::SmallString<100> OutStr;
+ Info.FormatDiagnostic(OutStr);
+
+ if (PrintDiagnosticOption)
+ if (const char *Opt = Diagnostic::getWarningOptionForDiag(Info.getID())) {
+ OutStr += " [-W";
+ OutStr += Opt;
+ OutStr += ']';
+ }
+
+ if (MessageLength) {
+ // We will be word-wrapping the error message, so compute the
+ // column number where we currently are (after printing the
+ // location information).
+ unsigned Column = OS.tell() - StartOfLocationInfo;
+ PrintWordWrapped(OS, OutStr, MessageLength, Column);
+ } else {
+ OS.write(OutStr.begin(), OutStr.size());
+ }
+ OS << '\n';
+
+ // If caret diagnostics are enabled and we have location, we want to
+ // emit the caret. However, we only do this if the location moved
+ // from the last diagnostic, if the last diagnostic was a note that
+ // was part of a different warning or error diagnostic, or if the
+ // diagnostic has ranges. We don't want to emit the same caret
+ // multiple times if one loc has multiple diagnostics.
+ if (CaretDiagnostics && Info.getLocation().isValid() &&
+ ((LastLoc != Info.getLocation()) || Info.getNumRanges() ||
+ (LastCaretDiagnosticWasNote && Level != Diagnostic::Note) ||
+ Info.getNumCodeModificationHints())) {
+ // Cache the LastLoc, it allows us to omit duplicate source/caret spewage.
+ LastLoc = Info.getLocation();
+ LastCaretDiagnosticWasNote = (Level == Diagnostic::Note);
+
+ // Get the ranges into a local array we can hack on.
+ SourceRange Ranges[20];
+ unsigned NumRanges = Info.getNumRanges();
+ assert(NumRanges < 20 && "Out of space");
+ for (unsigned i = 0; i != NumRanges; ++i)
+ Ranges[i] = Info.getRange(i);
+
+ unsigned NumHints = Info.getNumCodeModificationHints();
+ for (unsigned idx = 0; idx < NumHints; ++idx) {
+ const CodeModificationHint &Hint = Info.getCodeModificationHint(idx);
+ if (Hint.RemoveRange.isValid()) {
+ assert(NumRanges < 20 && "Out of space");
+ Ranges[NumRanges++] = Hint.RemoveRange;
+ }
+ }
+
+ EmitCaretDiagnostic(LastLoc, Ranges, NumRanges, LastLoc.getManager(),
+ Info.getCodeModificationHints(),
+ Info.getNumCodeModificationHints(),
+ MessageLength);
+ }
+
+ OS.flush();
+}
diff --git a/lib/Frontend/Warnings.cpp b/lib/Frontend/Warnings.cpp
new file mode 100644
index 0000000..81f75bd
--- /dev/null
+++ b/lib/Frontend/Warnings.cpp
@@ -0,0 +1,106 @@
+//===--- Warnings.cpp - C-Language Front-end ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Command line warning options handler.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is responsible for handling all warning options. This includes
+// a number of -Wfoo options and their variants, which are driven by TableGen-
+// generated data, and the special cases -pedantic, -pedantic-errors, -w and
+// -Werror.
+//
+// Each warning option controls any number of actual warnings.
+// Given a warning option 'foo', the following are valid:
+// -Wfoo, -Wno-foo, -Werror=foo
+//
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include <cstdio>
+#include <cstring>
+#include <utility>
+#include <algorithm>
+using namespace clang;
+
+bool clang::ProcessWarningOptions(Diagnostic &Diags,
+ std::vector<std::string> &Warnings,
+ bool Pedantic, bool PedanticErrors,
+ bool NoWarnings) {
+ Diags.setSuppressSystemWarnings(true); // Default to -Wno-system-headers
+ Diags.setIgnoreAllWarnings(NoWarnings);
+
+ // If -pedantic or -pedantic-errors was specified, then we want to map all
+ // extension diagnostics onto WARNING or ERROR unless the user has futz'd
+ // around with them explicitly.
+ if (PedanticErrors)
+ Diags.setExtensionHandlingBehavior(Diagnostic::Ext_Error);
+ else if (Pedantic)
+ Diags.setExtensionHandlingBehavior(Diagnostic::Ext_Warn);
+ else
+ Diags.setExtensionHandlingBehavior(Diagnostic::Ext_Ignore);
+
+ // FIXME: -Wfatal-errors / -Wfatal-errors=foo
+
+ for (unsigned i = 0, e = Warnings.size(); i != e; ++i) {
+ const std::string &Opt = Warnings[i];
+ const char *OptStart = &Opt[0];
+ const char *OptEnd = OptStart+Opt.size();
+ assert(*OptEnd == 0 && "Expect null termination for lower-bound search");
+
+ // Check to see if this warning starts with "no-", if so, this is a negative
+ // form of the option.
+ bool isPositive = true;
+ if (OptEnd-OptStart > 3 && memcmp(OptStart, "no-", 3) == 0) {
+ isPositive = false;
+ OptStart += 3;
+ }
+
+ // Figure out how this option affects the warning. If -Wfoo, map the
+ // diagnostic to a warning, if -Wno-foo, map it to ignore.
+ diag::Mapping Mapping = isPositive ? diag::MAP_WARNING : diag::MAP_IGNORE;
+
+ // -Wsystem-headers is a special case, not driven by the option table. It
+ // cannot be controlled with -Werror.
+ if (OptEnd-OptStart == 14 && memcmp(OptStart, "system-headers", 14) == 0) {
+ Diags.setSuppressSystemWarnings(!isPositive);
+ continue;
+ }
+
+ // -Werror/-Wno-error is a special case, not controlled by the option table.
+ // It also has the "specifier" form of -Werror=foo and -Werror-foo.
+ if (OptEnd-OptStart >= 5 && memcmp(OptStart, "error", 5) == 0) {
+ const char *Specifier = 0;
+ if (OptEnd-OptStart != 5) { // Specifier must be present.
+ if ((OptStart[5] != '=' && OptStart[5] != '-') ||
+ OptEnd-OptStart == 6) {
+ fprintf(stderr, "warning: unknown -Werror warning specifier: -W%s\n",
+ Opt.c_str());
+ continue;
+ }
+ Specifier = OptStart+6;
+ }
+
+ if (Specifier == 0) {
+ Diags.setWarningsAsErrors(true);
+ continue;
+ }
+
+ // -Werror=foo maps foo to Error, -Wno-error=foo maps it to Warning.
+ Mapping = isPositive ? diag::MAP_ERROR : diag::MAP_WARNING_NO_WERROR;
+ OptStart = Specifier;
+ }
+
+ if (Diags.setDiagnosticGroupMapping(OptStart, Mapping))
+ fprintf(stderr, "warning: unknown warning option: -W%s\n", Opt.c_str());
+ }
+
+ return false;
+}
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
new file mode 100644
index 0000000..3c42167
--- /dev/null
+++ b/lib/Headers/CMakeLists.txt
@@ -0,0 +1,25 @@
+set(files
+ iso646.h
+ mmintrin.h
+ stdarg.h
+ stdbool.h
+ stddef.h
+ )
+
+set(output_dir ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/../Headers)
+
+foreach( f ${files} )
+ set( src ${CMAKE_CURRENT_SOURCE_DIR}/${f} )
+ set( dst ${output_dir}/${f} )
+ add_custom_command(OUTPUT ${dst}
+ DEPENDS ${src}
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dst}
+ COMMENT "Copying clang's ${f}...")
+endforeach( f )
+
+add_custom_target(clang_headers ALL
+ DEPENDS ${files})
+
+install(FILES ${files}
+ PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
+ DESTINATION Headers)
diff --git a/lib/Headers/Makefile b/lib/Headers/Makefile
new file mode 100644
index 0000000..77eb96d
--- /dev/null
+++ b/lib/Headers/Makefile
@@ -0,0 +1,40 @@
+##===- clang/lib/Headers/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+include $(LEVEL)/Makefile.common
+
+# FIXME: Get version from a common place.
+HeaderDir := $(PROJ_OBJ_ROOT)/$(BuildMode)/lib/clang/1.0/include
+
+HEADERS := $(notdir $(wildcard $(PROJ_SRC_DIR)/*.h))
+
+OBJHEADERS := $(addprefix $(HeaderDir)/, $(HEADERS))
+
+
+$(OBJHEADERS): $(HeaderDir)/%.h: $(PROJ_SRC_DIR)/%.h $(HeaderDir)/.dir
+ $(Verb) cp $< $@
+ $(Echo) Copying $(notdir $<) to build dir
+
+# Hook into the standard Makefile rules.
+all-local:: $(OBJHEADERS)
+
+PROJ_headers := $(DESTDIR)$(PROJ_prefix)/lib/clang/1.0/include
+
+INSTHEADERS := $(addprefix $(PROJ_headers)/, $(HEADERS))
+
+$(PROJ_headers):
+ $(Verb) $(MKDIR) $@
+
+$(INSTHEADERS): $(PROJ_headers)/%.h: $(HeaderDir)/%.h | $(PROJ_headers)
+ $(Verb) $(DataInstall) $< $(PROJ_headers)
+ $(Echo) Installing compiler include file: $(notdir $<)
+
+install-local:: $(INSTHEADERS)
+
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
new file mode 100644
index 0000000..c96000a
--- /dev/null
+++ b/lib/Headers/emmintrin.h
@@ -0,0 +1,1329 @@
+/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __EMMINTRIN_H
+#define __EMMINTRIN_H
+
+#ifndef __SSE2__
+#error "SSE2 instruction set not enabled"
+#else
+
+#include <xmmintrin.h>
+
+typedef double __m128d __attribute__((__vector_size__(16)));
+typedef long long __m128i __attribute__((__vector_size__(16)));
+
+typedef int __v4si __attribute__((__vector_size__(16)));
+typedef short __v8hi __attribute__((__vector_size__(16)));
+typedef char __v16qi __attribute__((__vector_size__(16)));
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_add_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_addsd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_add_pd(__m128d a, __m128d b)
+{
+ return a + b;
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sub_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_subsd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pd(__m128d a, __m128d b)
+{
+ return a - b;
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_mul_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_mulsd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_mul_pd(__m128d a, __m128d b)
+{
+ return a * b;
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_div_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_divsd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_div_pd(__m128d a, __m128d b)
+{
+ return a / b;
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_sd(__m128d a, __m128d b)
+{
+ __m128d c = __builtin_ia32_sqrtsd(b);
+ return (__m128d) { c[0], a[1] };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_pd(__m128d a)
+{
+ return __builtin_ia32_sqrtpd(a);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_min_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_minsd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_min_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_minpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_max_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_maxsd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_max_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_maxpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_and_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_andpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_andnpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_or_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_orpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_xor_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_xorpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 0);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 1);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 2);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 1);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 2);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 7);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 3);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 4);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 5);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 6);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 5);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 6);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 0);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 1);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 2);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 1);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 2);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 7);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 3);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 4);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 5);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 6);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 5);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 6);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comieq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdeq(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comilt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdlt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comile_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdle(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comigt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdgt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comineq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdneq(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomieq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdeq(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomilt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdlt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomile_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdle(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomigt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdgt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomineq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdneq(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpd_ps(__m128d a)
+{
+ return __builtin_ia32_cvtpd2ps(a);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pd(__m128 a)
+{
+ return __builtin_ia32_cvtps2pd(a);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi32_pd(__m128i a)
+{
+ return __builtin_ia32_cvtdq2pd((__v4si)a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpd_epi32(__m128d a)
+{
+ return __builtin_ia32_cvtpd2dq(a);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_si32(__m128d a)
+{
+ return __builtin_ia32_cvtsd2si(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_ss(__m128 a, __m128d b)
+{
+ return __builtin_ia32_cvtsd2ss(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_sd(__m128d a, int b)
+{
+ return __builtin_ia32_cvtsi2sd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_sd(__m128d a, __m128 b)
+{
+ return __builtin_ia32_cvtss2sd(a, b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvttpd_epi32(__m128d a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq(a);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvttsd_si32(__m128d a)
+{
+ return __builtin_ia32_cvttsd2si(a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpd_pi32(__m128d a)
+{
+ return (__m64)__builtin_ia32_cvtpd2pi(a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvttpd_pi32(__m128d a)
+{
+ return (__m64)__builtin_ia32_cvttpd2pi(a);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi32_pd(__m64 a)
+{
+ return __builtin_ia32_cvtpi2pd((__v2si)a);
+}
+
+static inline double __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_f64(__m128d a)
+{
+ return a[0];
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_load_pd(double const *dp)
+{
+ return *(__m128d*)dp;
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_load1_pd(double const *dp)
+{
+ return (__m128d){ dp[0], dp[0] };
+}
+
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadr_pd(double const *dp)
+{
+ return (__m128d){ dp[1], dp[0] };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadu_pd(double const *dp)
+{
+ return __builtin_ia32_loadupd(dp);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_load_sd(double const *dp)
+{
+ return (__m128d){ *dp, 0.0 };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadh_pd(__m128d a, double const *dp)
+{
+ return __builtin_shufflevector(a, *(__m128d *)dp, 0, 2);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadl_pd(__m128d a, double const *dp)
+{
+ return __builtin_shufflevector(a, *(__m128d *)dp, 2, 1);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_set_sd(double w)
+{
+ return (__m128d){ w, 0 };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pd(double w)
+{
+ return (__m128d){ w, w };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_set_pd(double w, double x)
+{
+ return (__m128d){ w, x };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pd(double w, double x)
+{
+ return (__m128d){ x, w };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_setzero_pd(void)
+{
+ return (__m128d){ 0, 0 };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_move_sd(__m128d a, __m128d b)
+{
+ return (__m128d){ b[0], a[1] };
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_store_sd(double *dp, __m128d a)
+{
+ dp[0] = a[0];
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_store1_pd(double *dp, __m128d a)
+{
+ dp[0] = a[0];
+ dp[1] = a[0];
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_store_pd(double *dp, __m128d a)
+{
+ *(__m128d *)dp = a;
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storeu_pd(double *dp, __m128d a)
+{
+ __builtin_ia32_storeupd(dp, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storer_pd(double *dp, __m128d a)
+{
+ dp[0] = a[1];
+ dp[1] = a[0];
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storeh_pd(double *dp, __m128d a)
+{
+ dp[0] = a[1];
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storel_pd(double *dp, __m128d a)
+{
+ dp[0] = a[0];
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)((__v16qi)a + (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)((__v8hi)a + (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)((__v4si)a + (__v4si)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_si64(__m64 a, __m64 b)
+{
+ return a + b;
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi64(__m128i a, __m128i b)
+{
+ return a + b;
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddsb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddsw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddusb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddusw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_avg_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pavgb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_avg_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pavgw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_madd_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaxub128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pminsw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pminub128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmulhw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mullo_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmullw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mul_su32(__m64 a, __m64 b)
+{
+ return __builtin_ia32_pmuludq((__v2si)a, (__v2si)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mul_epu32(__m128i a, __m128i b)
+{
+ return __builtin_ia32_pmuludq128((__v4si)a, (__v4si)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sad_epu8(__m128i a, __m128i b)
+{
+ return __builtin_ia32_psadbw128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)((__v16qi)a - (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)((__v8hi)a - (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)((__v4si)a - (__v4si)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_si64(__m64 a, __m64 b)
+{
+ return a - b;
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi64(__m128i a, __m128i b)
+{
+ return a - b;
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubsb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubsw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubusb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubusw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_and_si128(__m128i a, __m128i b)
+{
+ return __builtin_ia32_pand128(a, b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_si128(__m128i a, __m128i b)
+{
+ return __builtin_ia32_pandn128(a, b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_or_si128(__m128i a, __m128i b)
+{
+ return __builtin_ia32_por128(a, b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_xor_si128(__m128i a, __m128i b)
+{
+ return __builtin_ia32_pxor128(a, b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_slli_si128(__m128i a, int imm)
+{
+ return __builtin_ia32_pslldqi128(a, imm * 8);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_slli_epi16(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psllwi128((__v8hi)a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sll_epi16(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psllw128((__v8hi)a, (__v8hi)count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_slli_epi32(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_pslldi128((__v4si)a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sll_epi32(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_pslld128((__v4si)a, (__v4si)count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_slli_epi64(__m128i a, int count)
+{
+ return __builtin_ia32_psllqi128(a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sll_epi64(__m128i a, __m128i count)
+{
+ return __builtin_ia32_psllq128(a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srai_epi16(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psrawi128((__v8hi)a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sra_epi16(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psraw128((__v8hi)a, (__v8hi)count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srai_epi32(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psradi128((__v4si)a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sra_epi32(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psrad128((__v4si)a, (__v4si)count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srli_si128(__m128i a, int imm)
+{
+ return __builtin_ia32_psrldqi128(a, imm * 8);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srli_epi16(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psrlwi128((__v8hi)a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srl_epi16(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psrlw128((__v8hi)a, (__v8hi)count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srli_epi32(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psrldi128((__v4si)a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srl_epi32(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psrld128((__v4si)a, (__v4si)count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srli_epi64(__m128i a, int count)
+{
+ return __builtin_ia32_psrlqi128(a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srl_epi64(__m128i a, __m128i count)
+{
+ return __builtin_ia32_psrlq128(a, count);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpeqb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpeqw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpeqd128((__v4si)a, (__v4si)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpgtb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpgtw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpgtd128((__v4si)a, (__v4si)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpgtb128((__v16qi)b, (__v16qi)a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpgtw128((__v8hi)b, (__v8hi)a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pcmpgtd128((__v4si)b, (__v4si)a);
+}
+
+#ifdef __x86_64__
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_sd(__m128d a, long long b)
+{
+ return __builtin_ia32_cvtsi642sd(a, b);
+}
+
+static inline long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_si64(__m128d a)
+{
+ return __builtin_ia32_cvtsd2si64(a);
+}
+
+static inline long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvttsd_si64(__m128d a)
+{
+ return __builtin_ia32_cvttsd2si64(a);
+}
+#endif
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi32_ps(__m128i a)
+{
+ return __builtin_ia32_cvtdq2ps((__v4si)a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_epi32(__m128 a)
+{
+ return (__m128i)__builtin_ia32_cvtps2dq(a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvttps_epi32(__m128 a)
+{
+ return (__m128i)__builtin_ia32_cvttps2dq(a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_si128(int a)
+{
+ return (__m128i)(__v4si){ a, 0, 0, 0 };
+}
+
+#ifdef __x86_64__
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_si128(long long a)
+{
+ return (__m128i){ a, 0 };
+}
+#endif
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi128_si32(__m128i a)
+{
+ __v4si b = (__v4si)a;
+ return b[0];
+}
+
+#ifdef __x86_64__
+static inline long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi128_si64(__m128i a)
+{
+ return a[0];
+}
+#endif
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_load_si128(__m128i const *p)
+{
+ return *p;
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_loadu_si128(__m128i const *p)
+{
+ return (__m128i)__builtin_ia32_loaddqu((char const *)p);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_loadl_epi64(__m128i const *p)
+{
+ return (__m128i)__builtin_ia32_loadlv4si((__v2si *)p);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi64(__m64 q1, __m64 q0)
+{
+ return (__m128i){ (long long)q0, (long long)q1 };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi32(int i3, int i2, int i1, int i0)
+{
+ return (__m128i)(__v4si){ i0, i1, i2, i3};
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0)
+{
+ return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
+{
+ return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi64(__m64 q)
+{
+ return (__m128i){ (long long)q, (long long)q };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi32(int i)
+{
+ return (__m128i)(__v4si){ i, i, i, i };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi16(short w)
+{
+ return (__m128i)(__v8hi){ w, w, w, w, w, w, w, w };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi8(char b)
+{
+ return (__m128i)(__v16qi){ b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi64(__m64 q0, __m64 q1)
+{
+ return (__m128i){ (long long)q0, (long long)q1 };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi32(int i0, int i1, int i2, int i3)
+{
+ return (__m128i)(__v4si){ i0, i1, i2, i3};
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7)
+{
+ return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi8(char b0, char b1, char b2, char b3, char b4, char b5, char b6, char b7, char b8, char b9, char b10, char b11, char b12, char b13, char b14, char b15)
+{
+ return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setzero_si128(void)
+{
+ return (__m128i){ 0LL, 0LL };
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_store_si128(__m128i *p, __m128i b)
+{
+ *p = b;
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storeu_si128(__m128i *p, __m128i b)
+{
+ __builtin_ia32_storedqu((char *)p, (__v16qi)b);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_maskmoveu_si128(__m128i d, __m128i n, char *p)
+{
+ __builtin_ia32_maskmovdqu((__v16qi)d, (__v16qi)n, p);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storel_epi64(__m128i *p, __m128i a)
+{
+ __builtin_ia32_storelv4si((__v2si *)p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_pd(double *p, __m128d a)
+{
+ __builtin_ia32_movntpd(p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_si128(__m128i *p, __m128i a)
+{
+ __builtin_ia32_movntdq(p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_si32(int *p, int a)
+{
+ __builtin_ia32_movnti(p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_clflush(void const *p)
+{
+ __builtin_ia32_clflush(p);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_lfence(void)
+{
+ __builtin_ia32_lfence();
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_mfence(void)
+{
+ __builtin_ia32_mfence();
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_packs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_packsswb128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_packs_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_packssdw128((__v4si)a, (__v4si)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_packus_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_packuswb128((__v8hi)a, (__v8hi)b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_extract_epi16(__m128i a, int imm)
+{
+ __v8hi b = (__v8hi)a;
+ return b[imm];
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_insert_epi16(__m128i a, int b, int imm)
+{
+ return (__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)a, b, imm);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_epi8(__m128i a)
+{
+ return __builtin_ia32_pmovmskb128((__v16qi)a);
+}
+
+#define _mm_shuffle_epi32(a, imm) ((__m128i)__builtin_ia32_pshufd((__v4si)(a), (imm)))
+#define _mm_shufflehi_epi16(a, imm) ((__m128i)__builtin_ia32_pshufhw((__v8hi)(a), (imm)))
+#define _mm_shufflelo_epi16(a, imm) ((__m128i)__builtin_ia32_pshuflw((__v8hi)(a), (imm)))
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)a, (__v16qi)b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)a, (__v8hi)b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)a, (__v4si)b, 2, 4+2, 3, 4+3);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi64(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector(a, b, 1, 2+1);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)a, (__v16qi)b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)a, (__v8hi)b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)a, (__v4si)b, 0, 4+0, 1, 4+1);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi64(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector(a, b, 0, 2+0);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_movepi64_pi64(__m128i a)
+{
+ return (__m64)a[0];
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_movpi64_pi64(__m64 a)
+{
+ return (__m128i){ (long long)a, 0 };
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_move_epi64(__m128i a)
+{
+ return (__m128i){ a[0], 0 };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pd(__m128d a, __m128d b)
+{
+ return __builtin_shufflevector(a, b, 1, 2+1);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pd(__m128d a, __m128d b)
+{
+ return __builtin_shufflevector(a, b, 0, 2+0);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_pd(__m128d a)
+{
+ return __builtin_ia32_movmskpd(a);
+}
+
+#define _mm_shuffle_pd(a, b, i) (__builtin_ia32_shufpd((a), (b), (i)))
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_castpd_ps(__m128d in)
+{
+ return (__m128)in;
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_castpd_si128(__m128d in)
+{
+ return (__m128i)in;
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_castps_pd(__m128 in)
+{
+ return (__m128d)in;
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_castps_si128(__m128 in)
+{
+ return (__m128i)in;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_castsi128_ps(__m128i in)
+{
+ return (__m128)in;
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_castsi128_pd(__m128i in)
+{
+ return (__m128d)in;
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_pause(void)
+{
+ __asm__ volatile ("pause");
+}
+
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+
+#endif /* __SSE2__ */
+
+#endif /* __EMMINTRIN_H */
diff --git a/lib/Headers/float.h b/lib/Headers/float.h
new file mode 100644
index 0000000..28fb882
--- /dev/null
+++ b/lib/Headers/float.h
@@ -0,0 +1,71 @@
+/*===---- float.h - Characteristics of floating point types ----------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __FLOAT_H
+#define __FLOAT_H
+
+/* Characteristics of floating point types, C99 5.2.4.2.2 */
+
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#define FLT_ROUNDS (__builtin_flt_rounds())
+#define FLT_RADIX __FLT_RADIX__
+
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+#define DECIMAL_DIG __DECIMAL_DIG__
+
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+#endif /* __FLOAT_H */
diff --git a/lib/Headers/iso646.h b/lib/Headers/iso646.h
new file mode 100644
index 0000000..dca13c5
--- /dev/null
+++ b/lib/Headers/iso646.h
@@ -0,0 +1,43 @@
+/*===---- iso646.h - Standard header for alternate spellings of operators---===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ISO646_H
+#define __ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif /* __ISO646_H */
diff --git a/lib/Headers/limits.h b/lib/Headers/limits.h
new file mode 100644
index 0000000..e4909ab
--- /dev/null
+++ b/lib/Headers/limits.h
@@ -0,0 +1,114 @@
+/*===---- limits.h - Standard header for integer sizes --------------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_LIMITS_H
+#define __CLANG_LIMITS_H
+
+/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
+ Avert this #include_next madness. */
+#if defined __GNUC__ && !defined _GCC_LIMITS_H_
+#define _GCC_LIMITS_H_
+#endif
+
+/* System headers include a number of constants from POSIX in <limits.h>. */
+#include_next <limits.h>
+
+/* Many system headers try to "help us out" by defining these. No really, we
+ know how big each datatype is. */
+#undef SCHAR_MIN
+#undef SCHAR_MAX
+#undef UCHAR_MAX
+#undef SHRT_MIN
+#undef SHRT_MAX
+#undef USHRT_MAX
+#undef INT_MIN
+#undef INT_MAX
+#undef UINT_MAX
+#undef LONG_MIN
+#undef LONG_MAX
+#undef ULONG_MAX
+
+#undef MB_LEN_MAX
+#undef CHAR_BIT
+#undef CHAR_MIN
+#undef CHAR_MAX
+
+/* C90/99 5.2.4.2.1 */
+#define SCHAR_MAX __SCHAR_MAX__
+#define SHRT_MAX __SHRT_MAX__
+#define INT_MAX __INT_MAX__
+#define LONG_MAX __LONG_MAX__
+
+#define SCHAR_MIN (-__SCHAR_MAX__-1)
+#define SHRT_MIN (-__SHRT_MAX__ -1)
+#define INT_MIN (-__INT_MAX__ -1)
+#define LONG_MIN (-__LONG_MAX__ -1L)
+
+#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
+#define USHRT_MAX (__SHRT_MAX__ *2 +1)
+#define UINT_MAX (__INT_MAX__ *2U +1U)
+#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
+
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+#define CHAR_BIT __CHAR_BIT__
+
+#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
+#define CHAR_MIN 0
+#define CHAR_MAX UCHAR_MAX
+#else
+#define CHAR_MIN SCHAR_MIN
+#define CHAR_MAX __SCHAR_MAX__
+#endif
+
+/* C99 5.2.4.2.1: Added long long. */
+#if __STDC_VERSION__ >= 199901
+
+#undef LLONG_MIN
+#undef LLONG_MAX
+#undef ULLONG_MAX
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad
+ that we don't have something like #pragma poison that could be used to
+ deprecate a macro - the code should just use LLONG_MAX and friends.
+ */
+#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
+
+#undef LONG_LONG_MIN
+#undef LONG_LONG_MAX
+#undef ULONG_LONG_MAX
+
+#define LONG_LONG_MAX __LONG_LONG_MAX__
+#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+#endif /* __CLANG_LIMITS_H */
diff --git a/lib/Headers/mm_malloc.h b/lib/Headers/mm_malloc.h
new file mode 100644
index 0000000..a680c47
--- /dev/null
+++ b/lib/Headers/mm_malloc.h
@@ -0,0 +1,59 @@
+/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MM_MALLOC_H
+#define __MM_MALLOC_H
+
+#include <errno.h>
+#include <stdlib.h>
+
+static inline void *__attribute__((__always_inline__, __nodebug__)) _mm_malloc(size_t size, size_t align)
+{
+ if (align & (align - 1)) {
+ errno = EINVAL;
+ return 0;
+ }
+
+ if (!size)
+ return 0;
+
+ if (align < 2 * sizeof(void *))
+ align = 2 * sizeof(void *);
+
+ void *mallocedMemory = malloc(size + align);
+ if (!mallocedMemory)
+ return 0;
+
+ void *alignedMemory = (void *)(((size_t)mallocedMemory + align) & ~((size_t)align - 1));
+ ((void **)alignedMemory)[-1] = mallocedMemory;
+
+ return alignedMemory;
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__)) _mm_free(void *p)
+{
+ if (p)
+ free(((void **)p)[-1]);
+}
+
+#endif /* __MM_MALLOC_H */
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
new file mode 100644
index 0000000..339d212
--- /dev/null
+++ b/lib/Headers/mmintrin.h
@@ -0,0 +1,449 @@
+/*===---- mmintrin.h - MMX intrinsics --------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MMINTRIN_H
+#define __MMINTRIN_H
+
+#ifndef __MMX__
+#error "MMX instruction set not enabled"
+#else
+
+typedef long long __m64 __attribute__((__vector_size__(8)));
+
+typedef int __v2si __attribute__((__vector_size__(8)));
+typedef short __v4hi __attribute__((__vector_size__(8)));
+typedef char __v8qi __attribute__((__vector_size__(8)));
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_empty(void)
+{
+ __builtin_ia32_emms();
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_si64(int __i)
+{
+ return (__m64)(__v2si){__i, 0};
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_si32(__m64 __m)
+{
+ __v2si __mmx_var2 = (__v2si)__m;
+ return __mmx_var2[0];
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_m64(long long __i)
+{
+ return (__m64)__i;
+}
+
+static inline long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtm64_si64(__m64 __m)
+{
+ return (long long)__m;
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_packs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_packs_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_packs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 4, 8+4, 5,
+ 8+5, 6, 8+6, 7, 8+7);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 2, 4+2, 3,
+ 4+3);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 2+1);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 0, 8+0, 1,
+ 8+1, 2, 8+2, 3, 8+3);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 0, 4+0, 1,
+ 4+1);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2+0);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)((__v8qi)__m1 + (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)((__v4hi)__m1 + (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)((__v2si)__m1 + (__v2si)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)((__v8qi)__m1 - (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)((__v4hi)__m1 - (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)((__v2si)__m1 - (__v2si)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_madd_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mullo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)((__v4hi)__m1 * (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sll_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_slli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sll_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_slli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sll_si64(__m64 __m, __m64 __count)
+{
+ return __builtin_ia32_psllq(__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_slli_si64(__m64 __m, int __count)
+{
+ return __builtin_ia32_psllqi(__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sra_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srai_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sra_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srai_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srl_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srl_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srl_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlq(__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srli_si64(__m64 __m, int __count)
+{
+ return __builtin_ia32_psrlqi(__m, __count);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_and_si64(__m64 __m1, __m64 __m2)
+{
+ return __m1 & __m2;
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_si64(__m64 __m1, __m64 __m2)
+{
+ return ~__m1 & __m2;
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_or_si64(__m64 __m1, __m64 __m2)
+{
+ return __m1 | __m2;
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_xor_si64(__m64 __m1, __m64 __m2)
+{
+ return __m1 ^ __m2;
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setzero_si64(void)
+{
+ return (__m64){ 0LL };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set_pi32(int __i1, int __i0)
+{
+ return (__m64)(__v2si){ __i0, __i1 };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
+{
+ return (__m64)(__v4hi){ __s0, __s1, __s2, __s3 };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
+ char __b1, char __b0)
+{
+ return (__m64)(__v8qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7 };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pi32(int __i)
+{
+ return (__m64)(__v2si){ __i, __i };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pi16(short __s)
+{
+ return (__m64)(__v4hi){ __s };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pi8(char __b)
+{
+ return (__m64)(__v8qi){ __b };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pi32(int __i1, int __i0)
+{
+ return (__m64)(__v2si){ __i1, __i0 };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pi16(short __s3, short __s2, short __s1, short __s0)
+{
+ return (__m64)(__v4hi){ __s3, __s2, __s1, __s0 };
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
+ char __b1, char __b0)
+{
+ return (__m64)(__v8qi){ __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0 };
+}
+
+#endif /* __MMX__ */
+
+#endif /* __MMINTRIN_H */
+
diff --git a/lib/Headers/pmmintrin.h b/lib/Headers/pmmintrin.h
new file mode 100644
index 0000000..cd90166
--- /dev/null
+++ b/lib/Headers/pmmintrin.h
@@ -0,0 +1,121 @@
+/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __PMMINTRIN_H
+#define __PMMINTRIN_H
+
+#ifndef __SSE3__
+#error "SSE3 instruction set not enabled"
+#else
+
+#include <emmintrin.h>
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_lddqu_si128(__m128i const *p)
+{
+ return (__m128i)__builtin_ia32_lddqu((char const *)p);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_addsub_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_addsubps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_haddps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_hsubps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_movehdup_ps(__m128 a)
+{
+ return __builtin_shufflevector(a, a, 1, 1, 3, 3);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_moveldup_ps(__m128 a)
+{
+ return __builtin_shufflevector(a, a, 0, 0, 2, 2);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_addsub_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_addsubpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_haddpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_hsubpd(a, b);
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loaddup_pd(double const *dp)
+{
+ return (__m128d){ *dp, *dp };
+}
+
+static inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_movedup_pd(__m128d a)
+{
+ return __builtin_shufflevector(a, a, 0, 0);
+}
+
+#define _MM_DENORMALS_ZERO_ON (0x0040)
+#define _MM_DENORMALS_ZERO_OFF (0x0000)
+
+#define _MM_DENORMALS_ZERO_MASK (0x0040)
+
+#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
+#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_monitor(void const *p, unsigned extensions, unsigned hints)
+{
+ __builtin_ia32_monitor((void *)p, extensions, hints);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_mwait(unsigned extensions, unsigned hints)
+{
+ __builtin_ia32_mwait(extensions, hints);
+}
+
+#endif /* __SSE3__ */
+
+#endif /* __PMMINTRIN_H */
diff --git a/lib/Headers/stdarg.h b/lib/Headers/stdarg.h
new file mode 100644
index 0000000..c436ced
--- /dev/null
+++ b/lib/Headers/stdarg.h
@@ -0,0 +1,47 @@
+/*===---- stdarg.h - Variable argument handling ----------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDARG_H
+#define __STDARG_H
+
+typedef __builtin_va_list va_list;
+#define va_start(ap, param) __builtin_va_start(ap, param)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_arg(ap, type) __builtin_va_arg(ap, type)
+
+/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode
+ * or -ansi is not specified, since it was not part of C90.
+ */
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+#if __STDC_VERSION__ >= 199900L || !defined(__STRICT_ANSI__)
+#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#endif
+
+/* Hack required to make standard headers work, at least on Ubuntu */
+#define __GNUC_VA_LIST 1
+typedef __builtin_va_list __gnuc_va_list;
+
+#endif /* __STDARG_H */
diff --git a/lib/Headers/stdbool.h b/lib/Headers/stdbool.h
new file mode 100644
index 0000000..e44a1f9
--- /dev/null
+++ b/lib/Headers/stdbool.h
@@ -0,0 +1,38 @@
+/*===---- stdbool.h - Standard header for booleans -------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDBOOL_H
+#define __STDBOOL_H
+
+/* Don't define bool, true, and false in C++ */
+#ifndef __cplusplus
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+
+#define __bool_true_false_are_defined 1
+
+#endif /* __STDBOOL_H */
diff --git a/lib/Headers/stddef.h b/lib/Headers/stddef.h
new file mode 100644
index 0000000..2c84b4b
--- /dev/null
+++ b/lib/Headers/stddef.h
@@ -0,0 +1,43 @@
+/*===---- stddef.h - Basic type definitions --------------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDDEF_H
+#define __STDDEF_H
+
+typedef __typeof__(((int*)0)-((int*)0)) ptrdiff_t;
+typedef __typeof__(sizeof(int)) size_t;
+#ifndef __cplusplus
+typedef __typeof__(*L"") wchar_t;
+#endif
+
+#ifdef __cplusplus
+#define NULL __null
+#else
+#define NULL ((void*)0)
+#endif
+
+#define offsetof(t, d) __builtin_offsetof(t, d)
+
+#endif /* __STDDEF_H */
diff --git a/lib/Headers/stdint.h b/lib/Headers/stdint.h
new file mode 100644
index 0000000..a7020d8
--- /dev/null
+++ b/lib/Headers/stdint.h
@@ -0,0 +1,232 @@
+/*===---- stdint.h - Standard header for sized integer types --------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_STDINT_H
+#define __CLANG_STDINT_H
+
+/* If we're hosted, fall back to the system's stdint.h, which might have
+ * additional definitions.
+ */
+#if __STDC_HOSTED__
+# include_next <stdint.h>
+#else
+
+/* We currently only support targets with power of two, 2s complement integers.
+ */
+
+/* C99 7.18.1.1 Exact-width integer types.
+ * C99 7.18.1.2 Minimum-width integer types.
+ * C99 7.18.1.3 Fastest minimum-width integer types.
+ * Since we only support pow-2 targets, these map directly to exact width types.
+ */
+
+#ifndef __int8_t_defined /* glibc does weird things with sys/types.h */
+#define __int8_t_defined
+typedef signed __INT8_TYPE__ int8_t;
+typedef __INT16_TYPE__ int16_t;
+typedef __INT32_TYPE__ int32_t;
+#ifdef __INT64_TYPE__
+typedef __INT64_TYPE__ int64_t;
+#endif
+#endif
+
+typedef unsigned __INT8_TYPE__ uint8_t;
+typedef int8_t int_least8_t;
+typedef uint8_t uint_least8_t;
+typedef int8_t int_fast8_t;
+typedef uint8_t uint_fast8_t;
+
+typedef unsigned __INT16_TYPE__ uint16_t;
+typedef int16_t int_least16_t;
+typedef uint16_t uint_least16_t;
+typedef int16_t int_fast16_t;
+typedef uint16_t uint_fast16_t;
+
+#ifndef __uint32_t_defined /* more glibc compatibility */
+#define __uint32_t_defined
+typedef unsigned __INT32_TYPE__ uint32_t;
+#endif
+typedef int32_t int_least32_t;
+typedef uint32_t uint_least32_t;
+typedef int32_t int_fast32_t;
+typedef uint32_t uint_fast32_t;
+
+/* Some 16-bit targets do not have a 64-bit datatype. Only define the 64-bit
+ * typedefs if there is something to typedef them to.
+ */
+#ifdef __INT64_TYPE__
+typedef unsigned __INT64_TYPE__ uint64_t;
+typedef int64_t int_least64_t;
+typedef uint64_t uint_least64_t;
+typedef int64_t int_fast64_t;
+typedef uint64_t uint_fast64_t;
+#endif
+
+
+/* C99 7.18.1.4 Integer types capable of holding object pointers.
+ */
+#ifndef __intptr_t_defined
+typedef __INTPTR_TYPE__ intptr_t;
+#define __intptr_t_defined
+#endif
+typedef unsigned __INTPTR_TYPE__ uintptr_t;
+
+/* C99 7.18.1.5 Greatest-width integer types.
+ */
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+/* C99 7.18.2.1 Limits of exact-width integer types.
+ * Fixed sized values have fixed size max/min.
+ * C99 7.18.2.2 Limits of minimum-width integer types.
+ * Since we map these directly onto fixed-sized types, these values the same.
+ * C99 7.18.2.3 Limits of fastest minimum-width integer types.
+ *
+ * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#define INT8_MAX 127
+#define INT8_MIN (-128)
+#define UINT8_MAX 255
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+
+#define INT16_MAX 32767
+#define INT16_MIN (-32768)
+#define UINT16_MAX 65535
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+
+#define INT32_MAX 2147483647
+#define INT32_MIN (-2147483647-1)
+#define UINT32_MAX 4294967295U
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+
+/* If we do not have 64-bit support, don't define the 64-bit size macros. */
+#ifdef __INT64_TYPE__
+#define INT64_MAX 9223372036854775807LL
+#define INT64_MIN (-9223372036854775807LL-1)
+#define UINT64_MAX 18446744073709551615ULL
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+#endif
+
+/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
+/* C99 7.18.3 Limits of other integer types. */
+
+#if __POINTER_WIDTH__ == 64
+
+#define INTPTR_MIN INT64_MIN
+#define INTPTR_MAX INT64_MAX
+#define UINTPTR_MAX UINT64_MAX
+#define PTRDIFF_MIN INT64_MIN
+#define PTRDIFF_MAX INT64_MAX
+#define SIZE_MAX UINT64_MAX
+
+#elif __POINTER_WIDTH__ == 32
+
+#define INTPTR_MIN INT32_MIN
+#define INTPTR_MAX INT32_MAX
+#define UINTPTR_MAX UINT32_MAX
+#define PTRDIFF_MIN INT32_MIN
+#define PTRDIFF_MAX INT32_MAX
+#define SIZE_MAX UINT32_MAX
+
+#elif __POINTER_WIDTH__ == 16
+
+#define INTPTR_MIN INT16_MIN
+#define INTPTR_MAX INT16_MAX
+#define UINTPTR_MAX UINT16_MAX
+#define PTRDIFF_MIN INT16_MIN
+#define PTRDIFF_MAX INT16_MAX
+#define SIZE_MAX UINT16_MAX
+
+#else
+#error "unknown or unset pointer width!"
+#endif
+
+/* C99 7.18.2.5 Limits of greatest-width integer types. */
+#define INTMAX_MIN (-__INTMAX_MAX__-1)
+#define INTMAX_MAX __INTMAX_MAX__
+#define UINTMAX_MAX (__INTMAX_MAX__*2ULL+1ULL)
+
+/* C99 7.18.3 Limits of other integer types. */
+#define SIG_ATOMIC_MIN INT32_MIN
+#define SIG_ATOMIC_MAX INT32_MAX
+#define WINT_MIN INT32_MIN
+#define WINT_MAX INT32_MAX
+
+/* FIXME: if we ever support a target with unsigned wchar_t, this should be
+ * 0 .. Max.
+ */
+#ifndef WCHAR_MAX
+#define WCHAR_MAX __WCHAR_MAX__
+#endif
+#ifndef WCHAR_MIN
+#define WCHAR_MIN (-__WCHAR_MAX__-1)
+#endif
+
+/* C99 7.18.4 Macros for minimum-width integer constants.
+ *
+ * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#define INT8_C(v) (v)
+#define UINT8_C(v) (v##U)
+#define INT16_C(v) (v)
+#define UINT16_C(v) (v##U)
+#define INT32_C(v) (v)
+#define UINT32_C(v) (v##U)
+
+/* Only define the 64-bit size macros if we have 64-bit support. */
+#ifdef __INT64_TYPE__
+#define INT64_C(v) (v##LL)
+#define UINT64_C(v) (v##ULL)
+#endif
+
+/* 7.18.4.2 Macros for greatest-width integer constants. */
+#define INTMAX_C(v) (v##LL)
+#define UINTMAX_C(v) (v##ULL)
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __CLANG_STDINT_H */
diff --git a/lib/Headers/tgmath.h b/lib/Headers/tgmath.h
new file mode 100644
index 0000000..e1a0023
--- /dev/null
+++ b/lib/Headers/tgmath.h
@@ -0,0 +1,1358 @@
+/*===---- tgmath.h - Standard header for type generic math ----------------===*\
+ *
+ * Copyright (c) 2009 Howard Hinnant
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __TGMATH_H
+#define __TGMATH_H
+
+/* C99 7.22 Type-generic math <tgmath.h>. */
+#include <math.h>
+
+/* C++ handles type genericity with overloading in math.h. */
+#ifndef __cplusplus
+#include <complex.h>
+
+#define _TG_ATTRSp __attribute__((__overloadable__))
+#define _TG_ATTRS __attribute__((__overloadable__, __always_inline__))
+
+// promotion
+
+typedef void _Argument_type_is_not_arithmetic;
+static _Argument_type_is_not_arithmetic __tg_promote(...)
+ __attribute__((__unavailable__,__overloadable__));
+static double _TG_ATTRSp __tg_promote(int);
+static double _TG_ATTRSp __tg_promote(unsigned int);
+static double _TG_ATTRSp __tg_promote(long);
+static double _TG_ATTRSp __tg_promote(unsigned long);
+static double _TG_ATTRSp __tg_promote(long long);
+static double _TG_ATTRSp __tg_promote(unsigned long long);
+static float _TG_ATTRSp __tg_promote(float);
+static double _TG_ATTRSp __tg_promote(double);
+static long double _TG_ATTRSp __tg_promote(long double);
+static float _Complex _TG_ATTRSp __tg_promote(float _Complex);
+static double _Complex _TG_ATTRSp __tg_promote(double _Complex);
+static long double _Complex _TG_ATTRSp __tg_promote(long double _Complex);
+
+#define __tg_promote1(__x) (__typeof__(__tg_promote(__x)))
+#define __tg_promote2(__x, __y) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y)))
+#define __tg_promote3(__x, __y, __z) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y) + \
+ __tg_promote(__z)))
+
+// acos
+
+static float
+ _TG_ATTRS
+ __tg_acos(float __x) {return acosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acos(double __x) {return acos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acos(long double __x) {return acosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acos(float _Complex __x) {return cacosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acos(double _Complex __x) {return cacos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acos(long double _Complex __x) {return cacosl(__x);}
+
+#undef acos
+#define acos(__x) __tg_acos(__tg_promote1((__x))(__x))
+
+// asin
+
+static float
+ _TG_ATTRS
+ __tg_asin(float __x) {return asinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asin(double __x) {return asin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asin(long double __x) {return asinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asin(float _Complex __x) {return casinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asin(double _Complex __x) {return casin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asin(long double _Complex __x) {return casinl(__x);}
+
+#undef asin
+#define asin(__x) __tg_asin(__tg_promote1((__x))(__x))
+
+// atan
+
+static float
+ _TG_ATTRS
+ __tg_atan(float __x) {return atanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atan(double __x) {return atan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan(long double __x) {return atanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atan(float _Complex __x) {return catanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atan(double _Complex __x) {return catan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atan(long double _Complex __x) {return catanl(__x);}
+
+#undef atan
+#define atan(__x) __tg_atan(__tg_promote1((__x))(__x))
+
+// acosh
+
+static float
+ _TG_ATTRS
+ __tg_acosh(float __x) {return acoshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acosh(double __x) {return acosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acosh(long double __x) {return acoshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acosh(float _Complex __x) {return cacoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acosh(double _Complex __x) {return cacosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acosh(long double _Complex __x) {return cacoshl(__x);}
+
+#undef acosh
+#define acosh(__x) __tg_acosh(__tg_promote1((__x))(__x))
+
+// asinh
+
+static float
+ _TG_ATTRS
+ __tg_asinh(float __x) {return asinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asinh(double __x) {return asinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asinh(long double __x) {return asinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asinh(float _Complex __x) {return casinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asinh(double _Complex __x) {return casinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asinh(long double _Complex __x) {return casinhl(__x);}
+
+#undef asinh
+#define asinh(__x) __tg_asinh(__tg_promote1((__x))(__x))
+
+// atanh
+
+static float
+ _TG_ATTRS
+ __tg_atanh(float __x) {return atanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atanh(double __x) {return atanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atanh(long double __x) {return atanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atanh(float _Complex __x) {return catanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atanh(double _Complex __x) {return catanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atanh(long double _Complex __x) {return catanhl(__x);}
+
+#undef atanh
+#define atanh(__x) __tg_atanh(__tg_promote1((__x))(__x))
+
+// cos
+
+static float
+ _TG_ATTRS
+ __tg_cos(float __x) {return cosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cos(double __x) {return cos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cos(long double __x) {return cosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cos(float _Complex __x) {return ccosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cos(double _Complex __x) {return ccos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cos(long double _Complex __x) {return ccosl(__x);}
+
+#undef cos
+#define cos(__x) __tg_cos(__tg_promote1((__x))(__x))
+
+// sin
+
+static float
+ _TG_ATTRS
+ __tg_sin(float __x) {return sinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sin(double __x) {return sin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sin(long double __x) {return sinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sin(float _Complex __x) {return csinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sin(double _Complex __x) {return csin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sin(long double _Complex __x) {return csinl(__x);}
+
+#undef sin
+#define sin(__x) __tg_sin(__tg_promote1((__x))(__x))
+
+// tan
+
+static float
+ _TG_ATTRS
+ __tg_tan(float __x) {return tanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tan(double __x) {return tan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tan(long double __x) {return tanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tan(float _Complex __x) {return ctanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tan(double _Complex __x) {return ctan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tan(long double _Complex __x) {return ctanl(__x);}
+
+#undef tan
+#define tan(__x) __tg_tan(__tg_promote1((__x))(__x))
+
+// cosh
+
+static float
+ _TG_ATTRS
+ __tg_cosh(float __x) {return coshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cosh(double __x) {return cosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cosh(long double __x) {return coshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cosh(float _Complex __x) {return ccoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cosh(double _Complex __x) {return ccosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cosh(long double _Complex __x) {return ccoshl(__x);}
+
+#undef cosh
+#define cosh(__x) __tg_cosh(__tg_promote1((__x))(__x))
+
+// sinh
+
+static float
+ _TG_ATTRS
+ __tg_sinh(float __x) {return sinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sinh(double __x) {return sinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sinh(long double __x) {return sinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sinh(float _Complex __x) {return csinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sinh(double _Complex __x) {return csinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sinh(long double _Complex __x) {return csinhl(__x);}
+
+#undef sinh
+#define sinh(__x) __tg_sinh(__tg_promote1((__x))(__x))
+
+// tanh
+
+static float
+ _TG_ATTRS
+ __tg_tanh(float __x) {return tanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tanh(double __x) {return tanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tanh(long double __x) {return tanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tanh(float _Complex __x) {return ctanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tanh(double _Complex __x) {return ctanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tanh(long double _Complex __x) {return ctanhl(__x);}
+
+#undef tanh
+#define tanh(__x) __tg_tanh(__tg_promote1((__x))(__x))
+
+// exp
+
+static float
+ _TG_ATTRS
+ __tg_exp(float __x) {return expf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp(double __x) {return exp(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp(long double __x) {return expl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_exp(float _Complex __x) {return cexpf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_exp(double _Complex __x) {return cexp(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_exp(long double _Complex __x) {return cexpl(__x);}
+
+#undef exp
+#define exp(__x) __tg_exp(__tg_promote1((__x))(__x))
+
+// log
+
+static float
+ _TG_ATTRS
+ __tg_log(float __x) {return logf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log(double __x) {return log(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log(long double __x) {return logl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_log(float _Complex __x) {return clogf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_log(double _Complex __x) {return clog(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_log(long double _Complex __x) {return clogl(__x);}
+
+#undef log
+#define log(__x) __tg_log(__tg_promote1((__x))(__x))
+
+// pow
+
+static float
+ _TG_ATTRS
+ __tg_pow(float __x, float __y) {return powf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_pow(double __x, double __y) {return pow(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_pow(long double __x, long double __y) {return powl(__x, __y);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_pow(float _Complex __x, float _Complex __y) {return cpowf(__x, __y);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_pow(double _Complex __x, double _Complex __y) {return cpow(__x, __y);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_pow(long double _Complex __x, long double _Complex __y)
+ {return cpowl(__x, __y);}
+
+#undef pow
+#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// sqrt
+
+static float
+ _TG_ATTRS
+ __tg_sqrt(float __x) {return sqrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sqrt(double __x) {return sqrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sqrt(long double __x) {return sqrtl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sqrt(float _Complex __x) {return csqrtf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sqrt(double _Complex __x) {return csqrt(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sqrt(long double _Complex __x) {return csqrtl(__x);}
+
+#undef sqrt
+#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x))
+
+// fabs
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float __x) {return fabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double __x) {return fabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double __x) {return fabsl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_fabs(float _Complex __x) {return cabsf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_fabs(double _Complex __x) {return cabs(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_fabs(long double _Complex __x) {return cabsl(__x);}
+
+#undef fabs
+#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x))
+
+// atan2
+
+static float
+ _TG_ATTRS
+ __tg_atan2(float __x, float __y) {return atan2f(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_atan2(double __x, double __y) {return atan2(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan2(long double __x, long double __y) {return atan2l(__x, __y);}
+
+#undef atan2
+#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// cbrt
+
+static float
+ _TG_ATTRS
+ __tg_cbrt(float __x) {return cbrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cbrt(double __x) {return cbrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cbrt(long double __x) {return cbrtl(__x);}
+
+#undef cbrt
+#define cbrt(__x) __tg_cbrt(__tg_promote1((__x))(__x))
+
+// ceil
+
+static float
+ _TG_ATTRS
+ __tg_ceil(float __x) {return ceilf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_ceil(double __x) {return ceil(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_ceil(long double __x) {return ceill(__x);}
+
+#undef ceil
+#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x))
+
+// copysign
+
+static float
+ _TG_ATTRS
+ __tg_copysign(float __x, float __y) {return copysignf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_copysign(double __x, double __y) {return copysign(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_copysign(long double __x, long double __y) {return copysignl(__x, __y);}
+
+#undef copysign
+#define copysign(__x, __y) __tg_copysign(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// erf
+
+static float
+ _TG_ATTRS
+ __tg_erf(float __x) {return erff(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erf(double __x) {return erf(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erf(long double __x) {return erfl(__x);}
+
+#undef erf
+#define erf(__x) __tg_erf(__tg_promote1((__x))(__x))
+
+// erfc
+
+static float
+ _TG_ATTRS
+ __tg_erfc(float __x) {return erfcf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erfc(double __x) {return erfc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erfc(long double __x) {return erfcl(__x);}
+
+#undef erfc
+#define erfc(__x) __tg_erfc(__tg_promote1((__x))(__x))
+
+// exp2
+
+static float
+ _TG_ATTRS
+ __tg_exp2(float __x) {return exp2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp2(double __x) {return exp2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp2(long double __x) {return exp2l(__x);}
+
+#undef exp2
+#define exp2(__x) __tg_exp2(__tg_promote1((__x))(__x))
+
+// expm1
+
+static float
+ _TG_ATTRS
+ __tg_expm1(float __x) {return expm1f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_expm1(double __x) {return expm1(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_expm1(long double __x) {return expm1l(__x);}
+
+#undef expm1
+#define expm1(__x) __tg_expm1(__tg_promote1((__x))(__x))
+
+// fdim
+
+static float
+ _TG_ATTRS
+ __tg_fdim(float __x, float __y) {return fdimf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fdim(double __x, double __y) {return fdim(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fdim(long double __x, long double __y) {return fdiml(__x, __y);}
+
+#undef fdim
+#define fdim(__x, __y) __tg_fdim(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// floor
+
+static float
+ _TG_ATTRS
+ __tg_floor(float __x) {return floorf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_floor(double __x) {return floor(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_floor(long double __x) {return floorl(__x);}
+
+#undef floor
+#define floor(__x) __tg_floor(__tg_promote1((__x))(__x))
+
+// fma
+
+static float
+ _TG_ATTRS
+ __tg_fma(float __x, float __y, float __z)
+ {return fmaf(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_fma(double __x, double __y, double __z)
+ {return fma(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_fma(long double __x,long double __y, long double __z)
+ {return fmal(__x, __y, __z);}
+
+#undef fma
+#define fma(__x, __y, __z) \
+ __tg_fma(__tg_promote3((__x), (__y), (__z))(__x), \
+ __tg_promote3((__x), (__y), (__z))(__y), \
+ __tg_promote3((__x), (__y), (__z))(__z))
+
+// fmax
+
+static float
+ _TG_ATTRS
+ __tg_fmax(float __x, float __y) {return fmaxf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmax(double __x, double __y) {return fmax(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmax(long double __x, long double __y) {return fmaxl(__x, __y);}
+
+#undef fmax
+#define fmax(__x, __y) __tg_fmax(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmin
+
+static float
+ _TG_ATTRS
+ __tg_fmin(float __x, float __y) {return fminf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmin(double __x, double __y) {return fmin(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmin(long double __x, long double __y) {return fminl(__x, __y);}
+
+#undef fmin
+#define fmin(__x, __y) __tg_fmin(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmod
+
+static float
+ _TG_ATTRS
+ __tg_fmod(float __x, float __y) {return fmodf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmod(double __x, double __y) {return fmod(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmod(long double __x, long double __y) {return fmodl(__x, __y);}
+
+#undef fmod
+#define fmod(__x, __y) __tg_fmod(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// frexp
+
+static float
+ _TG_ATTRS
+ __tg_frexp(float __x, int* __y) {return frexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_frexp(double __x, int* __y) {return frexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_frexp(long double __x, int* __y) {return frexpl(__x, __y);}
+
+#undef frexp
+#define frexp(__x, __y) __tg_frexp(__tg_promote1((__x))(__x), __y)
+
+// hypot
+
+static float
+ _TG_ATTRS
+ __tg_hypot(float __x, float __y) {return hypotf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_hypot(double __x, double __y) {return hypot(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_hypot(long double __x, long double __y) {return hypotl(__x, __y);}
+
+#undef hypot
+#define hypot(__x, __y) __tg_hypot(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// ilogb
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(float __x) {return ilogbf(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(double __x) {return ilogb(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(long double __x) {return ilogbl(__x);}
+
+#undef ilogb
+#define ilogb(__x) __tg_ilogb(__tg_promote1((__x))(__x))
+
+// ldexp
+
+static float
+ _TG_ATTRS
+ __tg_ldexp(float __x, int __y) {return ldexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_ldexp(double __x, int __y) {return ldexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_ldexp(long double __x, int __y) {return ldexpl(__x, __y);}
+
+#undef ldexp
+#define ldexp(__x, __y) __tg_ldexp(__tg_promote1((__x))(__x), __y)
+
+// lgamma
+
+static float
+ _TG_ATTRS
+ __tg_lgamma(float __x) {return lgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_lgamma(double __x) {return lgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_lgamma(long double __x) {return lgammal(__x);}
+
+#undef lgamma
+#define lgamma(__x) __tg_lgamma(__tg_promote1((__x))(__x))
+
+// llrint
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(float __x) {return llrintf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(double __x) {return llrint(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(long double __x) {return llrintl(__x);}
+
+#undef llrint
+#define llrint(__x) __tg_llrint(__tg_promote1((__x))(__x))
+
+// llround
+
+static long long
+ _TG_ATTRS
+ __tg_llround(float __x) {return llroundf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(double __x) {return llround(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(long double __x) {return llroundl(__x);}
+
+#undef llround
+#define llround(__x) __tg_llround(__tg_promote1((__x))(__x))
+
+// log10
+
+static float
+ _TG_ATTRS
+ __tg_log10(float __x) {return log10f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log10(double __x) {return log10(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log10(long double __x) {return log10l(__x);}
+
+#undef log10
+#define log10(__x) __tg_log10(__tg_promote1((__x))(__x))
+
+// log1p
+
+static float
+ _TG_ATTRS
+ __tg_log1p(float __x) {return log1pf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log1p(double __x) {return log1p(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log1p(long double __x) {return log1pl(__x);}
+
+#undef log1p
+#define log1p(__x) __tg_log1p(__tg_promote1((__x))(__x))
+
+// log2
+
+static float
+ _TG_ATTRS
+ __tg_log2(float __x) {return log2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log2(double __x) {return log2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log2(long double __x) {return log2l(__x);}
+
+#undef log2
+#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
+
+// lrint
+
+static long
+ _TG_ATTRS
+ __tg_lrint(float __x) {return lrintf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(double __x) {return lrint(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(long double __x) {return lrintl(__x);}
+
+#undef lrint
+#define lrint(__x) __tg_lrint(__tg_promote1((__x))(__x))
+
+// lround
+
+static long
+ _TG_ATTRS
+ __tg_lround(float __x) {return lroundf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(double __x) {return lround(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(long double __x) {return lroundl(__x);}
+
+#undef lround
+#define lround(__x) __tg_lround(__tg_promote1((__x))(__x))
+
+// nearbyint
+
+static float
+ _TG_ATTRS
+ __tg_nearbyint(float __x) {return nearbyintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_nearbyint(double __x) {return nearbyint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_nearbyint(long double __x) {return nearbyintl(__x);}
+
+#undef nearbyint
+#define nearbyint(__x) __tg_nearbyint(__tg_promote1((__x))(__x))
+
+// nextafter
+
+static float
+ _TG_ATTRS
+ __tg_nextafter(float __x, float __y) {return nextafterf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nextafter(double __x, double __y) {return nextafter(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nextafter(long double __x, long double __y) {return nextafterl(__x, __y);}
+
+#undef nextafter
+#define nextafter(__x, __y) __tg_nextafter(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// nexttoward
+
+static float
+ _TG_ATTRS
+ __tg_nexttoward(float __x, float __y) {return nexttowardf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nexttoward(double __x, double __y) {return nexttoward(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nexttoward(long double __x, long double __y) {return nexttowardl(__x, __y);}
+
+#undef nexttoward
+#define nexttoward(__x, __y) __tg_nexttoward(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// remainder
+
+static float
+ _TG_ATTRS
+ __tg_remainder(float __x, float __y) {return remainderf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_remainder(double __x, double __y) {return remainder(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_remainder(long double __x, long double __y) {return remainderl(__x, __y);}
+
+#undef remainder
+#define remainder(__x, __y) __tg_remainder(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// remquo
+
+static float
+ _TG_ATTRS
+ __tg_remquo(float __x, float __y, int* __z)
+ {return remquof(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_remquo(double __x, double __y, int* __z)
+ {return remquo(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_remquo(long double __x,long double __y, int* __z)
+ {return remquol(__x, __y, __z);}
+
+#undef remquo
+#define remquo(__x, __y, __z) \
+ __tg_remquo(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y), \
+ (__z))
+
+// rint
+
+static float
+ _TG_ATTRS
+ __tg_rint(float __x) {return rintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_rint(double __x) {return rint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_rint(long double __x) {return rintl(__x);}
+
+#undef rint
+#define rint(__x) __tg_rint(__tg_promote1((__x))(__x))
+
+// round
+
+static float
+ _TG_ATTRS
+ __tg_round(float __x) {return roundf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_round(double __x) {return round(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_round(long double __x) {return roundl(__x);}
+
+#undef round
+#define round(__x) __tg_round(__tg_promote1((__x))(__x))
+
+// scalbn
+
+static float
+ _TG_ATTRS
+ __tg_scalbn(float __x, int __y) {return scalbnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbn(double __x, int __y) {return scalbn(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbn(long double __x, int __y) {return scalbnl(__x, __y);}
+
+#undef scalbn
+#define scalbn(__x, __y) __tg_scalbn(__tg_promote1((__x))(__x), __y)
+
+// scalbln
+
+static float
+ _TG_ATTRS
+ __tg_scalbln(float __x, long __y) {return scalblnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbln(double __x, long __y) {return scalbln(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbln(long double __x, long __y) {return scalblnl(__x, __y);}
+
+#undef scalbln
+#define scalbln(__x, __y) __tg_scalbln(__tg_promote1((__x))(__x), __y)
+
+// tgamma
+
+static float
+ _TG_ATTRS
+ __tg_tgamma(float __x) {return tgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tgamma(double __x) {return tgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tgamma(long double __x) {return tgammal(__x);}
+
+#undef tgamma
+#define tgamma(__x) __tg_tgamma(__tg_promote1((__x))(__x))
+
+// trunc
+
+static float
+ _TG_ATTRS
+ __tg_trunc(float __x) {return truncf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_trunc(double __x) {return trunc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_trunc(long double __x) {return truncl(__x);}
+
+#undef trunc
+#define trunc(__x) __tg_trunc(__tg_promote1((__x))(__x))
+
+// carg
+
+static float
+ _TG_ATTRS
+ __tg_carg(float __x) {return atan2f(0.F, __x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double __x) {return atan2(0., __x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double __x) {return atan2l(0.L, __x);}
+
+static float
+ _TG_ATTRS
+ __tg_carg(float _Complex __x) {return cargf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double _Complex __x) {return carg(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double _Complex __x) {return cargl(__x);}
+
+#undef carg
+#define carg(__x) __tg_carg(__tg_promote1((__x))(__x))
+
+// cimag
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float __x) {return 0;}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double __x) {return 0;}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double __x) {return 0;}
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float _Complex __x) {return cimagf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double _Complex __x) {return cimag(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double _Complex __x) {return cimagl(__x);}
+
+#undef cimag
+#define cimag(__x) __tg_cimag(__tg_promote1((__x))(__x))
+
+// conj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float __x) {return __x;}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double __x) {return __x;}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double __x) {return __x;}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float _Complex __x) {return conjf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double _Complex __x) {return conj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double _Complex __x) {return conjl(__x);}
+
+#undef conj
+#define conj(__x) __tg_conj(__tg_promote1((__x))(__x))
+
+// cproj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double __x) {return cprojl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float _Complex __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double _Complex __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double _Complex __x) {return cprojl(__x);}
+
+#undef cproj
+#define cproj(__x) __tg_cproj(__tg_promote1((__x))(__x))
+
+// creal
+
+static float _Complex
+ _TG_ATTRS
+ __tg_creal(float __x) {return __x;}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_creal(double __x) {return __x;}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_creal(long double __x) {return __x;}
+
+static float
+ _TG_ATTRS
+ __tg_creal(float _Complex __x) {return crealf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double _Complex __x) {return creal(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double _Complex __x) {return creall(__x);}
+
+#undef creal
+#define creal(__x) __tg_creal(__tg_promote1((__x))(__x))
+
+#undef _TG_ATTRSp
+#undef _TG_ATTRS
+
+#endif /* __cplusplus */
+#endif /* __TGMATH_H */
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
new file mode 100644
index 0000000..e9715f1
--- /dev/null
+++ b/lib/Headers/tmmintrin.h
@@ -0,0 +1,218 @@
+/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __TMMINTRIN_H
+#define __TMMINTRIN_H
+
+#ifndef __SSSE3__
+#error "SSSE3 instruction set not enabled"
+#else
+
+#include <pmmintrin.h>
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_abs_pi8(__m64 a)
+{
+ return (__m64)__builtin_ia32_pabsb((__v8qi)a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_abs_epi8(__m128i a)
+{
+ return (__m128i)__builtin_ia32_pabsb128((__v16qi)a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_abs_pi16(__m64 a)
+{
+ return (__m64)__builtin_ia32_pabsw((__v4hi)a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_abs_epi16(__m128i a)
+{
+ return (__m128i)__builtin_ia32_pabsw128((__v8hi)a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_abs_pi32(__m64 a)
+{
+ return (__m64)__builtin_ia32_pabsd((__v2si)a);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_abs_epi32(__m128i a)
+{
+ return (__m128i)__builtin_ia32_pabsd128((__v4si)a);
+}
+
+#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
+#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phaddw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phaddd128((__v4si)a, (__v4si)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phaddw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_pi32(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phaddd((__v2si)a, (__v2si)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hadds_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phaddsw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hadds_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phaddsw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phsubw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubd128((__v4si)a, (__v4si)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psubw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_pi32(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psubd((__v2si)a, (__v2si)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phsubsw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hsubs_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phsubsw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maddubs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_maddubs_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmaddubsw((__v8qi)a, (__v8qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mulhrs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mulhrs_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmulhrsw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shuffle_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pshufb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_shuffle_pi8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pshufb((__v8qi)a, (__v8qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sign_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psignb128((__v16qi)a, (__v16qi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sign_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psignw128((__v8hi)a, (__v8hi)b);
+}
+
+static inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sign_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psignd128((__v4si)a, (__v4si)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sign_pi8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psignb((__v8qi)a, (__v8qi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sign_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psignw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sign_pi32(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psignd((__v2si)a, (__v2si)b);
+}
+
+#endif /* __SSSE3__ */
+
+#endif /* __TMMINTRIN_H */
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
new file mode 100644
index 0000000..c104f63
--- /dev/null
+++ b/lib/Headers/xmmintrin.h
@@ -0,0 +1,888 @@
+/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __XMMINTRIN_H
+#define __XMMINTRIN_H
+
+#ifndef __SSE__
+#error "SSE instruction set not enabled"
+#else
+
+#include <mmintrin.h>
+
+typedef float __v4sf __attribute__((__vector_size__(16)));
+typedef float __m128 __attribute__((__vector_size__(16)));
+
+#include <mm_malloc.h>
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_add_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_addss(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_add_ps(__m128 a, __m128 b)
+{
+ return a + b;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_subss(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_ps(__m128 a, __m128 b)
+{
+ return a - b;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_mul_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_mulss(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_mul_ps(__m128 a, __m128 b)
+{
+ return a * b;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_div_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_divss(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_div_ps(__m128 a, __m128 b)
+{
+ return a / b;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_ss(__m128 a)
+{
+ return __builtin_ia32_sqrtss(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_ps(__m128 a)
+{
+ return __builtin_ia32_sqrtps(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rcp_ss(__m128 a)
+{
+ return __builtin_ia32_rcpss(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rcp_ps(__m128 a)
+{
+ return __builtin_ia32_rcpps(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt_ss(__m128 a)
+{
+ return __builtin_ia32_rsqrtss(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt_ps(__m128 a)
+{
+ return __builtin_ia32_rsqrtps(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_min_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_minss(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_min_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_minps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_max_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_maxss(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_max_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_maxps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_and_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_andps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_andnps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_or_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_orps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_xor_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_xorps(a, b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 0);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 0);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 1);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 1);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 2);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 2);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 1);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 1);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 2);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 2);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 4);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 4);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 5);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 5);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 6);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 6);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 5);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 5);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 6);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 6);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 7);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 7);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 3);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 3);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comieq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comieq(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comilt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comilt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comile_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comile(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comigt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comigt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comige_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comige(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_comineq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comineq(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomieq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomieq(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomilt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomilt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomile_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomile(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomigt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomigt(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomige_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomige(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomineq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomineq(a, b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_si32(__m128 a)
+{
+ return __builtin_ia32_cvtss2si(a);
+}
+
+static inline long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_si64(__m128 a)
+{
+ return __builtin_ia32_cvtss2si64(a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pi32(__m128 a)
+{
+ return (__m64)__builtin_ia32_cvtps2pi(a);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvttss_si32(__m128 a)
+{
+ return __builtin_ia32_cvttss2si(a);
+}
+
+static inline long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvttss_si64(__m128 a)
+{
+ return __builtin_ia32_cvttss2si64(a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvttps_pi32(__m128 a)
+{
+ return (__m64)__builtin_ia32_cvttps2pi(a);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_ss(__m128 a, int b)
+{
+ return __builtin_ia32_cvtsi2ss(a, b);
+}
+
+#ifdef __x86_64__
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_ss(__m128 a, long long b)
+{
+ return __builtin_ia32_cvtsi642ss(a, b);
+}
+
+#endif
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi32_ps(__m128 a, __m64 b)
+{
+ return __builtin_ia32_cvtpi2ps(a, (__v2si)b);
+}
+
+static inline float __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_f32(__m128 a)
+{
+ return a[0];
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadh_pi(__m128 a, __m64 const *p)
+{
+ return __builtin_ia32_loadhps(a, (__v2si *)p);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadl_pi(__m128 a, __m64 const *p)
+{
+ return __builtin_ia32_loadlps(a, (__v2si *)p);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_load_ss(float *p)
+{
+ return (__m128){ *p, 0, 0, 0 };
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_load1_ps(float *p)
+{
+ return (__m128){ *p, *p, *p, *p };
+}
+
+#define _mm_load_ps1(p) _mm_load1_ps(p)
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_load_ps(float *p)
+{
+ return *(__m128*)p;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadu_ps(float *p)
+{
+ return __builtin_ia32_loadups(p);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadr_ps(float *p)
+{
+ __m128 a = _mm_load_ps(p);
+ return __builtin_shufflevector(a, a, 3, 2, 1, 0);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set_ss(float w)
+{
+ return (__m128){ w, 0, 0, 0 };
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_ps(float w)
+{
+ return (__m128){ w, w, w, w };
+}
+
+// Microsoft specific.
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set_ps1(float w)
+{
+ return _mm_set1_ps(w);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set_ps(float z, float y, float x, float w)
+{
+ return (__m128){ w, x, y, z };
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_ps(float z, float y, float x, float w)
+{
+ return (__m128){ z, y, x, w };
+}
+
+static inline __m128 __attribute__((__always_inline__))
+_mm_setzero_ps(void)
+{
+ return (__m128){ 0, 0, 0, 0 };
+}
+
+static inline void __attribute__((__always_inline__))
+_mm_storeh_pi(__m64 *p, __m128 a)
+{
+ __builtin_ia32_storehps((__v2si *)p, a);
+}
+
+static inline void __attribute__((__always_inline__))
+_mm_storel_pi(__m64 *p, __m128 a)
+{
+ __builtin_ia32_storelps((__v2si *)p, a);
+}
+
+static inline void __attribute__((__always_inline__))
+_mm_store_ss(float *p, __m128 a)
+{
+ *p = a[0];
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storeu_ps(float *p, __m128 a)
+{
+ __builtin_ia32_storeups(p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_store1_ps(float *p, __m128 a)
+{
+ a = __builtin_shufflevector(a, a, 0, 0, 0, 0);
+ _mm_storeu_ps(p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_store_ps(float *p, __m128 a)
+{
+ *(__m128 *)p = a;
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_storer_ps(float *p, __m128 a)
+{
+ a = __builtin_shufflevector(a, a, 3, 2, 1, 0);
+ _mm_store_ps(p, a);
+}
+
+#define _MM_HINT_T0 1
+#define _MM_HINT_T1 2
+#define _MM_HINT_T2 3
+#define _MM_HINT_NTA 0
+
+/* FIXME: We have to #define this because "sel" must be a constant integer, and
+ Sema doesn't do any form of constant propagation yet. */
+
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)a, 0, sel))
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_pi(__m64 *p, __m64 a)
+{
+ __builtin_ia32_movntq(p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_ps(float *p, __m128 a)
+{
+ __builtin_ia32_movntps(p, a);
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_sfence(void)
+{
+ __builtin_ia32_sfence();
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_extract_pi16(__m64 a, int n)
+{
+ /* FIXME:
+ * This should force n to be an immediate.
+ * This does not use the PEXTRW instruction. From looking at the LLVM source, the
+ instruction doesn't seem to be hooked up.
+ * The code could probably be made better :)
+ */
+ __v4hi b = (__v4hi)a;
+ return b[(n == 0) ? 0 : (n == 1 ? 1 : (n == 2 ? 2 : 3))];
+}
+
+/* FIXME: Implement this. We could add a __builtin_insertelement function that's similar to
+ the already existing __builtin_shufflevector.
+*/
+/*
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_insert_pi16(__m64 a, int d, int n)
+{
+ return (__m64){ 0LL };
+}
+*/
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_max_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmaxsw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_max_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmaxub((__v8qi)a, (__v8qi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_min_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pminsw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_min_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pminub((__v8qi)a, (__v8qi)b);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_pi8(__m64 a)
+{
+ return __builtin_ia32_pmovmskb((__v8qi)a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_pu16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);
+}
+
+#define _mm_shuffle_pi16(a, n) ((__m64)__builtin_ia32_pshufw((__v4hi)a, n))
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_maskmove_si64(__m64 d, __m64 n, char *p)
+{
+ __builtin_ia32_maskmovq((__v8qi)d, (__v8qi)n, p);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_avg_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pavgb((__v8qi)a, (__v8qi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_avg_pu16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pavgw((__v4hi)a, (__v4hi)b);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sad_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psadbw((__v8qi)a, (__v8qi)b);
+}
+
+static inline unsigned int __attribute__((__always_inline__, __nodebug__))
+_mm_getcsr(void)
+{
+ return __builtin_ia32_stmxcsr();
+}
+
+static inline void __attribute__((__always_inline__, __nodebug__))
+_mm_setcsr(unsigned int i)
+{
+ __builtin_ia32_ldmxcsr(i);
+}
+
+#define _mm_shuffle_ps(a, b, mask) (__builtin_ia32_shufps(a, b, mask))
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 2, 6, 3, 7);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 0, 4, 1, 5);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_move_ss(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 4, 1, 2, 3);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_movehl_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 6, 7, 2, 3);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_movelh_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 0, 1, 4, 5);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi16_ps(__m64 a)
+{
+ __m64 b, c;
+ __m128 r;
+
+ b = _mm_setzero_si64();
+ b = _mm_cmpgt_pi16(b, a);
+ c = _mm_unpackhi_pi16(a, b);
+ r = _mm_setzero_ps();
+ r = _mm_cvtpi32_ps(r, c);
+ r = _mm_movelh_ps(r, r);
+ c = _mm_unpacklo_pi16(a, b);
+ r = _mm_cvtpi32_ps(r, c);
+
+ return r;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpu16_ps(__m64 a)
+{
+ __m64 b, c;
+ __m128 r;
+
+ b = _mm_setzero_si64();
+ c = _mm_unpackhi_pi16(a, b);
+ r = _mm_setzero_ps();
+ r = _mm_cvtpi32_ps(r, c);
+ r = _mm_movelh_ps(r, r);
+ c = _mm_unpacklo_pi16(a, b);
+ r = _mm_cvtpi32_ps(r, c);
+
+ return r;
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi8_ps(__m64 a)
+{
+ __m64 b;
+
+ b = _mm_setzero_si64();
+ b = _mm_cmpgt_pi8(b, a);
+ b = _mm_unpacklo_pi8(a, b);
+
+ return _mm_cvtpi16_ps(b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpu8_ps(__m64 a)
+{
+ __m64 b;
+
+ b = _mm_setzero_si64();
+ b = _mm_unpacklo_pi8(a, b);
+
+ return _mm_cvtpi16_ps(b);
+}
+
+static inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi32x2_ps(__m64 a, __m64 b)
+{
+ __m128 c;
+
+ c = _mm_setzero_ps();
+ c = _mm_cvtpi32_ps(c, b);
+ c = _mm_movelh_ps(c, c);
+
+ return _mm_cvtpi32_ps(c, a);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pi16(__m128 a)
+{
+ __m64 b, c;
+
+ b = _mm_cvtps_pi32(a);
+ a = _mm_movehl_ps(a, a);
+ c = _mm_cvtps_pi32(a);
+
+ return _mm_packs_pi16(b, c);
+}
+
+static inline __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pi8(__m128 a)
+{
+ __m64 b, c;
+
+ b = _mm_cvtps_pi16(a);
+ c = _mm_setzero_si64();
+
+ return _mm_packs_pi16(b, c);
+}
+
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_ps(__m128 a)
+{
+ return __builtin_ia32_movmskps(a);
+}
+
+#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
+
+#define _MM_EXCEPT_INVALID (0x0001)
+#define _MM_EXCEPT_DENORM (0x0002)
+#define _MM_EXCEPT_DIV_ZERO (0x0004)
+#define _MM_EXCEPT_OVERFLOW (0x0008)
+#define _MM_EXCEPT_UNDERFLOW (0x0010)
+#define _MM_EXCEPT_INEXACT (0x0020)
+#define _MM_EXCEPT_MASK (0x003f)
+
+#define _MM_MASK_INVALID (0x0080)
+#define _MM_MASK_DENORM (0x0100)
+#define _MM_MASK_DIV_ZERO (0x0200)
+#define _MM_MASK_OVERFLOW (0x0400)
+#define _MM_MASK_UNDERFLOW (0x0800)
+#define _MM_MASK_INEXACT (0x1000)
+#define _MM_MASK_MASK (0x1f80)
+
+#define _MM_ROUND_NEAREST (0x0000)
+#define _MM_ROUND_DOWN (0x2000)
+#define _MM_ROUND_UP (0x4000)
+#define _MM_ROUND_TOWARD_ZERO (0x6000)
+#define _MM_ROUND_MASK (0x6000)
+
+#define _MM_FLUSH_ZERO_MASK (0x8000)
+#define _MM_FLUSH_ZERO_ON (0x8000)
+#define _MM_FLUSH_ZERO_OFF (0x8000)
+
+#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
+#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
+#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
+#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
+
+#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
+#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
+#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
+#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
+
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __m128 tmp3, tmp2, tmp1, tmp0; \
+ tmp0 = _mm_unpacklo_ps((row0), (row1)); \
+ tmp2 = _mm_unpacklo_ps((row2), (row3)); \
+ tmp1 = _mm_unpackhi_ps((row0), (row1)); \
+ tmp3 = _mm_unpackhi_ps((row2), (row3)); \
+ (row0) = _mm_movelh_ps(tmp0, tmp2); \
+ (row1) = _mm_movehl_ps(tmp2, tmp0); \
+ (row2) = _mm_movelh_ps(tmp1, tmp3); \
+ (row3) = _mm_movelh_ps(tmp3, tmp1); \
+} while (0)
+
+#include <emmintrin.h>
+
+#endif /* __SSE__ */
+
+#endif /* __XMMINTRIN_H */
diff --git a/lib/Lex/CMakeLists.txt b/lib/Lex/CMakeLists.txt
new file mode 100644
index 0000000..a7237a7
--- /dev/null
+++ b/lib/Lex/CMakeLists.txt
@@ -0,0 +1,26 @@
+set(LLVM_NO_RTTI 1)
+
+# TODO: Add -maltivec when ARCH is PowerPC.
+
+add_clang_library(clangLex
+ HeaderMap.cpp
+ HeaderSearch.cpp
+ Lexer.cpp
+ LiteralSupport.cpp
+ MacroArgs.cpp
+ MacroInfo.cpp
+ PPCaching.cpp
+ PPDirectives.cpp
+ PPExpressions.cpp
+ PPLexerChange.cpp
+ PPMacroExpansion.cpp
+ Pragma.cpp
+ Preprocessor.cpp
+ PreprocessorLexer.cpp
+ PTHLexer.cpp
+ ScratchBuffer.cpp
+ TokenLexer.cpp
+ TokenConcatenation.cpp
+ )
+
+add_dependencies(clangLex ClangDiagnosticLex)
diff --git a/lib/Lex/HeaderMap.cpp b/lib/Lex/HeaderMap.cpp
new file mode 100644
index 0000000..4c8b70e
--- /dev/null
+++ b/lib/Lex/HeaderMap.cpp
@@ -0,0 +1,245 @@
+//===--- HeaderMap.cpp - A file that acts like dir of symlinks ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the HeaderMap interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/HeaderMap.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Data Structures and Manifest Constants
+//===----------------------------------------------------------------------===//
+
+enum {
+ HMAP_HeaderMagicNumber = ('h' << 24) | ('m' << 16) | ('a' << 8) | 'p',
+ HMAP_HeaderVersion = 1,
+
+ HMAP_EmptyBucketKey = 0
+};
+
+namespace clang {
+struct HMapBucket {
+ uint32_t Key; // Offset (into strings) of key.
+
+ uint32_t Prefix; // Offset (into strings) of value prefix.
+ uint32_t Suffix; // Offset (into strings) of value suffix.
+};
+
+struct HMapHeader {
+ uint32_t Magic; // Magic word, also indicates byte order.
+ uint16_t Version; // Version number -- currently 1.
+ uint16_t Reserved; // Reserved for future use - zero for now.
+ uint32_t StringsOffset; // Offset to start of string pool.
+ uint32_t NumEntries; // Number of entries in the string table.
+ uint32_t NumBuckets; // Number of buckets (always a power of 2).
+ uint32_t MaxValueLength; // Length of longest result path (excluding nul).
+ // An array of 'NumBuckets' HMapBucket objects follows this header.
+ // Strings follow the buckets, at StringsOffset.
+};
+} // end namespace clang.
+
+/// HashHMapKey - This is the 'well known' hash function required by the file
+/// format, used to look up keys in the hash table. The hash table uses simple
+/// linear probing based on this function.
+static inline unsigned HashHMapKey(const char *S, const char *End) {
+ unsigned Result = 0;
+
+ for (; S != End; S++)
+ Result += tolower(*S) * 13;
+ return Result;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Verification and Construction
+//===----------------------------------------------------------------------===//
+
+/// HeaderMap::Create - This attempts to load the specified file as a header
+/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
+/// If it looks like a HeaderMap but is obviously corrupted, it puts a reason
+/// into the string error argument and returns null.
+const HeaderMap *HeaderMap::Create(const FileEntry *FE) {
+ // If the file is too small to be a header map, ignore it.
+ unsigned FileSize = FE->getSize();
+ if (FileSize <= sizeof(HMapHeader)) return 0;
+
+ llvm::OwningPtr<const llvm::MemoryBuffer> FileBuffer(
+ llvm::MemoryBuffer::getFile(FE->getName(), 0, FE->getSize()));
+ if (FileBuffer == 0) return 0; // Unreadable file?
+ const char *FileStart = FileBuffer->getBufferStart();
+
+ // We know the file is at least as big as the header, check it now.
+ const HMapHeader *Header = reinterpret_cast<const HMapHeader*>(FileStart);
+
+ // Sniff it to see if it's a headermap by checking the magic number and
+ // version.
+ bool NeedsByteSwap;
+ if (Header->Magic == HMAP_HeaderMagicNumber &&
+ Header->Version == HMAP_HeaderVersion)
+ NeedsByteSwap = false;
+ else if (Header->Magic == llvm::ByteSwap_32(HMAP_HeaderMagicNumber) &&
+ Header->Version == llvm::ByteSwap_16(HMAP_HeaderVersion))
+ NeedsByteSwap = true; // Mixed endianness headermap.
+ else
+ return 0; // Not a header map.
+
+ if (Header->Reserved != 0) return 0;
+
+ // Okay, everything looks good, create the header map.
+ return new HeaderMap(FileBuffer.take(), NeedsByteSwap);
+}
+
+HeaderMap::~HeaderMap() {
+ delete FileBuffer;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility Methods
+//===----------------------------------------------------------------------===//
+
+
+/// getFileName - Return the filename of the headermap.
+const char *HeaderMap::getFileName() const {
+ return FileBuffer->getBufferIdentifier();
+}
+
+unsigned HeaderMap::getEndianAdjustedWord(unsigned X) const {
+ if (!NeedsBSwap) return X;
+ return llvm::ByteSwap_32(X);
+}
+
+/// getHeader - Return a reference to the file header, in unbyte-swapped form.
+/// This method cannot fail.
+const HMapHeader &HeaderMap::getHeader() const {
+ // We know the file is at least as big as the header. Return it.
+ return *reinterpret_cast<const HMapHeader*>(FileBuffer->getBufferStart());
+}
+
+/// getBucket - Return the specified hash table bucket from the header map,
+/// bswap'ing its fields as appropriate. If the bucket number is not valid,
+/// this return a bucket with an empty key (0).
+HMapBucket HeaderMap::getBucket(unsigned BucketNo) const {
+ HMapBucket Result;
+ Result.Key = HMAP_EmptyBucketKey;
+
+ const HMapBucket *BucketArray =
+ reinterpret_cast<const HMapBucket*>(FileBuffer->getBufferStart() +
+ sizeof(HMapHeader));
+
+ const HMapBucket *BucketPtr = BucketArray+BucketNo;
+ if ((char*)(BucketPtr+1) > FileBuffer->getBufferEnd()) {
+ Result.Prefix = 0;
+ Result.Suffix = 0;
+ return Result; // Invalid buffer, corrupt hmap.
+ }
+
+ // Otherwise, the bucket is valid. Load the values, bswapping as needed.
+ Result.Key = getEndianAdjustedWord(BucketPtr->Key);
+ Result.Prefix = getEndianAdjustedWord(BucketPtr->Prefix);
+ Result.Suffix = getEndianAdjustedWord(BucketPtr->Suffix);
+ return Result;
+}
+
+/// getString - Look up the specified string in the string table. If the string
+/// index is not valid, it returns an empty string.
+const char *HeaderMap::getString(unsigned StrTabIdx) const {
+ // Add the start of the string table to the idx.
+ StrTabIdx += getEndianAdjustedWord(getHeader().StringsOffset);
+
+ // Check for invalid index.
+ if (StrTabIdx >= FileBuffer->getBufferSize())
+ return 0;
+
+ // Otherwise, we have a valid pointer into the file. Just return it. We know
+ // that the "string" can not overrun the end of the file, because the buffer
+ // is nul terminated by virtue of being a MemoryBuffer.
+ return FileBuffer->getBufferStart()+StrTabIdx;
+}
+
+/// StringsEqualWithoutCase - Compare the specified two strings for case-
+/// insensitive equality, returning true if they are equal. Both strings are
+/// known to have the same length.
+static bool StringsEqualWithoutCase(const char *S1, const char *S2,
+ unsigned Len) {
+ for (; Len; ++S1, ++S2, --Len)
+ if (tolower(*S1) != tolower(*S2))
+ return false;
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// The Main Drivers
+//===----------------------------------------------------------------------===//
+
+/// dump - Print the contents of this headermap to stderr.
+void HeaderMap::dump() const {
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+
+ fprintf(stderr, "Header Map %s:\n %d buckets, %d entries\n",
+ getFileName(), NumBuckets,
+ getEndianAdjustedWord(Hdr.NumEntries));
+
+ for (unsigned i = 0; i != NumBuckets; ++i) {
+ HMapBucket B = getBucket(i);
+ if (B.Key == HMAP_EmptyBucketKey) continue;
+
+ const char *Key = getString(B.Key);
+ const char *Prefix = getString(B.Prefix);
+ const char *Suffix = getString(B.Suffix);
+ fprintf(stderr, " %d. %s -> '%s' '%s'\n", i, Key, Prefix, Suffix);
+ }
+}
+
+/// LookupFile - Check to see if the specified relative filename is located in
+/// this HeaderMap. If so, open it and return its FileEntry.
+const FileEntry *HeaderMap::LookupFile(const char *FilenameStart,
+ const char *FilenameEnd,
+ FileManager &FM) const {
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+
+ // If the number of buckets is not a power of two, the headermap is corrupt.
+ // Don't probe infinitely.
+ if (NumBuckets & (NumBuckets-1))
+ return 0;
+
+ // Linearly probe the hash table.
+ for (unsigned Bucket = HashHMapKey(FilenameStart, FilenameEnd);; ++Bucket) {
+ HMapBucket B = getBucket(Bucket & (NumBuckets-1));
+ if (B.Key == HMAP_EmptyBucketKey) return 0; // Hash miss.
+
+ // See if the key matches. If not, probe on.
+ const char *Key = getString(B.Key);
+ unsigned BucketKeyLen = strlen(Key);
+ if (BucketKeyLen != unsigned(FilenameEnd-FilenameStart))
+ continue;
+
+ // See if the actual strings equal.
+ if (!StringsEqualWithoutCase(FilenameStart, Key, BucketKeyLen))
+ continue;
+
+ // If so, we have a match in the hash table. Construct the destination
+ // path.
+ llvm::SmallString<1024> DestPath;
+ DestPath += getString(B.Prefix);
+ DestPath += getString(B.Suffix);
+ return FM.getFile(DestPath.begin(), DestPath.end());
+ }
+}
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
new file mode 100644
index 0000000..129fa1a
--- /dev/null
+++ b/lib/Lex/HeaderSearch.cpp
@@ -0,0 +1,446 @@
+//===--- HeaderSearch.cpp - Resolve Header File Locations ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DirectoryLookup and HeaderSearch interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderMap.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/System/Path.h"
+#include "llvm/ADT/SmallString.h"
+#include <cstdio>
+using namespace clang;
+
+const IdentifierInfo *
+HeaderFileInfo::getControllingMacro(ExternalIdentifierLookup *External) {
+ if (ControllingMacro)
+ return ControllingMacro;
+
+ if (!ControllingMacroID || !External)
+ return 0;
+
+ ControllingMacro = External->GetIdentifier(ControllingMacroID);
+ return ControllingMacro;
+}
+
+HeaderSearch::HeaderSearch(FileManager &FM) : FileMgr(FM), FrameworkMap(64) {
+ SystemDirIdx = 0;
+ NoCurDirSearch = false;
+
+ ExternalLookup = 0;
+ NumIncluded = 0;
+ NumMultiIncludeFileOptzn = 0;
+ NumFrameworkLookups = NumSubFrameworkLookups = 0;
+}
+
+HeaderSearch::~HeaderSearch() {
+ // Delete headermaps.
+ for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
+ delete HeaderMaps[i].second;
+}
+
+void HeaderSearch::PrintStats() {
+ fprintf(stderr, "\n*** HeaderSearch Stats:\n");
+ fprintf(stderr, "%d files tracked.\n", (int)FileInfo.size());
+ unsigned NumOnceOnlyFiles = 0, MaxNumIncludes = 0, NumSingleIncludedFiles = 0;
+ for (unsigned i = 0, e = FileInfo.size(); i != e; ++i) {
+ NumOnceOnlyFiles += FileInfo[i].isImport;
+ if (MaxNumIncludes < FileInfo[i].NumIncludes)
+ MaxNumIncludes = FileInfo[i].NumIncludes;
+ NumSingleIncludedFiles += FileInfo[i].NumIncludes == 1;
+ }
+ fprintf(stderr, " %d #import/#pragma once files.\n", NumOnceOnlyFiles);
+ fprintf(stderr, " %d included exactly once.\n", NumSingleIncludedFiles);
+ fprintf(stderr, " %d max times a file is included.\n", MaxNumIncludes);
+
+ fprintf(stderr, " %d #include/#include_next/#import.\n", NumIncluded);
+ fprintf(stderr, " %d #includes skipped due to"
+ " the multi-include optimization.\n", NumMultiIncludeFileOptzn);
+
+ fprintf(stderr, "%d framework lookups.\n", NumFrameworkLookups);
+ fprintf(stderr, "%d subframework lookups.\n", NumSubFrameworkLookups);
+}
+
+/// CreateHeaderMap - This method returns a HeaderMap for the specified
+/// FileEntry, uniquing them through the the 'HeaderMaps' datastructure.
+const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
+ // We expect the number of headermaps to be small, and almost always empty.
+ // If it ever grows, use of a linear search should be re-evaluated.
+ if (!HeaderMaps.empty()) {
+ for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
+ // Pointer equality comparison of FileEntries works because they are
+ // already uniqued by inode.
+ if (HeaderMaps[i].first == FE)
+ return HeaderMaps[i].second;
+ }
+
+ if (const HeaderMap *HM = HeaderMap::Create(FE)) {
+ HeaderMaps.push_back(std::make_pair(FE, HM));
+ return HM;
+ }
+
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// File lookup within a DirectoryLookup scope
+//===----------------------------------------------------------------------===//
+
+/// getName - Return the directory or filename corresponding to this lookup
+/// object.
+const char *DirectoryLookup::getName() const {
+ if (isNormalDir())
+ return getDir()->getName();
+ if (isFramework())
+ return getFrameworkDir()->getName();
+ assert(isHeaderMap() && "Unknown DirectoryLookup");
+ return getHeaderMap()->getFileName();
+}
+
+
+/// LookupFile - Lookup the specified file in this search path, returning it
+/// if it exists or returning null if not.
+const FileEntry *DirectoryLookup::LookupFile(const char *FilenameStart,
+ const char *FilenameEnd,
+ HeaderSearch &HS) const {
+ llvm::SmallString<1024> TmpDir;
+ if (isNormalDir()) {
+ // Concatenate the requested file onto the directory.
+ // FIXME: Portability. Filename concatenation should be in sys::Path.
+ TmpDir += getDir()->getName();
+ TmpDir.push_back('/');
+ TmpDir.append(FilenameStart, FilenameEnd);
+ return HS.getFileMgr().getFile(TmpDir.begin(), TmpDir.end());
+ }
+
+ if (isFramework())
+ return DoFrameworkLookup(FilenameStart, FilenameEnd, HS);
+
+ assert(isHeaderMap() && "Unknown directory lookup");
+ return getHeaderMap()->LookupFile(FilenameStart, FilenameEnd,HS.getFileMgr());
+}
+
+
+/// DoFrameworkLookup - Do a lookup of the specified file in the current
+/// DirectoryLookup, which is a framework directory.
+const FileEntry *DirectoryLookup::DoFrameworkLookup(const char *FilenameStart,
+ const char *FilenameEnd,
+ HeaderSearch &HS) const {
+ FileManager &FileMgr = HS.getFileMgr();
+
+ // Framework names must have a '/' in the filename.
+ const char *SlashPos = std::find(FilenameStart, FilenameEnd, '/');
+ if (SlashPos == FilenameEnd) return 0;
+
+ // Find out if this is the home for the specified framework, by checking
+ // HeaderSearch. Possible answer are yes/no and unknown.
+ const DirectoryEntry *&FrameworkDirCache =
+ HS.LookupFrameworkCache(FilenameStart, SlashPos);
+
+ // If it is known and in some other directory, fail.
+ if (FrameworkDirCache && FrameworkDirCache != getFrameworkDir())
+ return 0;
+
+ // Otherwise, construct the path to this framework dir.
+
+ // FrameworkName = "/System/Library/Frameworks/"
+ llvm::SmallString<1024> FrameworkName;
+ FrameworkName += getFrameworkDir()->getName();
+ if (FrameworkName.empty() || FrameworkName.back() != '/')
+ FrameworkName.push_back('/');
+
+ // FrameworkName = "/System/Library/Frameworks/Cocoa"
+ FrameworkName.append(FilenameStart, SlashPos);
+
+ // FrameworkName = "/System/Library/Frameworks/Cocoa.framework/"
+ FrameworkName += ".framework/";
+
+ // If the cache entry is still unresolved, query to see if the cache entry is
+ // still unresolved. If so, check its existence now.
+ if (FrameworkDirCache == 0) {
+ HS.IncrementFrameworkLookupCount();
+
+ // If the framework dir doesn't exist, we fail.
+ // FIXME: It's probably more efficient to query this with FileMgr.getDir.
+ if (!llvm::sys::Path(std::string(FrameworkName.begin(),
+ FrameworkName.end())).exists())
+ return 0;
+
+ // Otherwise, if it does, remember that this is the right direntry for this
+ // framework.
+ FrameworkDirCache = getFrameworkDir();
+ }
+
+ // Check "/System/Library/Frameworks/Cocoa.framework/Headers/file.h"
+ unsigned OrigSize = FrameworkName.size();
+
+ FrameworkName += "Headers/";
+ FrameworkName.append(SlashPos+1, FilenameEnd);
+ if (const FileEntry *FE = FileMgr.getFile(FrameworkName.begin(),
+ FrameworkName.end())) {
+ return FE;
+ }
+
+ // Check "/System/Library/Frameworks/Cocoa.framework/PrivateHeaders/file.h"
+ const char *Private = "Private";
+ FrameworkName.insert(FrameworkName.begin()+OrigSize, Private,
+ Private+strlen(Private));
+ return FileMgr.getFile(FrameworkName.begin(), FrameworkName.end());
+}
+
+
+//===----------------------------------------------------------------------===//
+// Header File Location.
+//===----------------------------------------------------------------------===//
+
+
+/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+/// return null on failure. isAngled indicates whether the file reference is
+/// for system #include's or not (i.e. using <> instead of ""). CurFileEnt, if
+/// non-null, indicates where the #including file is, in case a relative search
+/// is needed.
+const FileEntry *HeaderSearch::LookupFile(const char *FilenameStart,
+ const char *FilenameEnd,
+ bool isAngled,
+ const DirectoryLookup *FromDir,
+ const DirectoryLookup *&CurDir,
+ const FileEntry *CurFileEnt) {
+ // If 'Filename' is absolute, check to see if it exists and no searching.
+ // FIXME: Portability. This should be a sys::Path interface, this doesn't
+ // handle things like C:\foo.txt right, nor win32 \\network\device\blah.
+ if (FilenameStart[0] == '/') {
+ CurDir = 0;
+
+ // If this was an #include_next "/absolute/file", fail.
+ if (FromDir) return 0;
+
+ // Otherwise, just return the file.
+ return FileMgr.getFile(FilenameStart, FilenameEnd);
+ }
+
+ // Step #0, unless disabled, check to see if the file is in the #includer's
+ // directory. This has to be based on CurFileEnt, not CurDir, because
+ // CurFileEnt could be a #include of a subdirectory (#include "foo/bar.h") and
+ // a subsequent include of "baz.h" should resolve to "whatever/foo/baz.h".
+ // This search is not done for <> headers.
+ if (CurFileEnt && !isAngled && !NoCurDirSearch) {
+ llvm::SmallString<1024> TmpDir;
+ // Concatenate the requested file onto the directory.
+ // FIXME: Portability. Filename concatenation should be in sys::Path.
+ TmpDir += CurFileEnt->getDir()->getName();
+ TmpDir.push_back('/');
+ TmpDir.append(FilenameStart, FilenameEnd);
+ if (const FileEntry *FE = FileMgr.getFile(TmpDir.begin(), TmpDir.end())) {
+ // Leave CurDir unset.
+ // This file is a system header or C++ unfriendly if the old file is.
+ //
+ // Note that the temporary 'DirInfo' is required here, as either call to
+ // getFileInfo could resize the vector and we don't want to rely on order
+ // of evaluation.
+ unsigned DirInfo = getFileInfo(CurFileEnt).DirInfo;
+ getFileInfo(FE).DirInfo = DirInfo;
+ return FE;
+ }
+ }
+
+ CurDir = 0;
+
+ // If this is a system #include, ignore the user #include locs.
+ unsigned i = isAngled ? SystemDirIdx : 0;
+
+ // If this is a #include_next request, start searching after the directory the
+ // file was found in.
+ if (FromDir)
+ i = FromDir-&SearchDirs[0];
+
+ // Cache all of the lookups performed by this method. Many headers are
+ // multiply included, and the "pragma once" optimization prevents them from
+ // being relex/pp'd, but they would still have to search through a
+ // (potentially huge) series of SearchDirs to find it.
+ std::pair<unsigned, unsigned> &CacheLookup =
+ LookupFileCache.GetOrCreateValue(FilenameStart, FilenameEnd).getValue();
+
+ // If the entry has been previously looked up, the first value will be
+ // non-zero. If the value is equal to i (the start point of our search), then
+ // this is a matching hit.
+ if (CacheLookup.first == i+1) {
+ // Skip querying potentially lots of directories for this lookup.
+ i = CacheLookup.second;
+ } else {
+ // Otherwise, this is the first query, or the previous query didn't match
+ // our search start. We will fill in our found location below, so prime the
+ // start point value.
+ CacheLookup.first = i+1;
+ }
+
+ // Check each directory in sequence to see if it contains this file.
+ for (; i != SearchDirs.size(); ++i) {
+ const FileEntry *FE =
+ SearchDirs[i].LookupFile(FilenameStart, FilenameEnd, *this);
+ if (!FE) continue;
+
+ CurDir = &SearchDirs[i];
+
+ // This file is a system header or C++ unfriendly if the dir is.
+ getFileInfo(FE).DirInfo = CurDir->getDirCharacteristic();
+
+ // Remember this location for the next lookup we do.
+ CacheLookup.second = i;
+ return FE;
+ }
+
+ // Otherwise, didn't find it. Remember we didn't find this.
+ CacheLookup.second = SearchDirs.size();
+ return 0;
+}
+
+/// LookupSubframeworkHeader - Look up a subframework for the specified
+/// #include file. For example, if #include'ing <HIToolbox/HIToolbox.h> from
+/// within ".../Carbon.framework/Headers/Carbon.h", check to see if HIToolbox
+/// is a subframework within Carbon.framework. If so, return the FileEntry
+/// for the designated file, otherwise return null.
+const FileEntry *HeaderSearch::
+LookupSubframeworkHeader(const char *FilenameStart,
+ const char *FilenameEnd,
+ const FileEntry *ContextFileEnt) {
+ assert(ContextFileEnt && "No context file?");
+
+ // Framework names must have a '/' in the filename. Find it.
+ const char *SlashPos = std::find(FilenameStart, FilenameEnd, '/');
+ if (SlashPos == FilenameEnd) return 0;
+
+ // Look up the base framework name of the ContextFileEnt.
+ const char *ContextName = ContextFileEnt->getName();
+
+ // If the context info wasn't a framework, couldn't be a subframework.
+ const char *FrameworkPos = strstr(ContextName, ".framework/");
+ if (FrameworkPos == 0)
+ return 0;
+
+ llvm::SmallString<1024> FrameworkName(ContextName,
+ FrameworkPos+strlen(".framework/"));
+
+ // Append Frameworks/HIToolbox.framework/
+ FrameworkName += "Frameworks/";
+ FrameworkName.append(FilenameStart, SlashPos);
+ FrameworkName += ".framework/";
+
+ llvm::StringMapEntry<const DirectoryEntry *> &CacheLookup =
+ FrameworkMap.GetOrCreateValue(FilenameStart, SlashPos);
+
+ // Some other location?
+ if (CacheLookup.getValue() &&
+ CacheLookup.getKeyLength() == FrameworkName.size() &&
+ memcmp(CacheLookup.getKeyData(), &FrameworkName[0],
+ CacheLookup.getKeyLength()) != 0)
+ return 0;
+
+ // Cache subframework.
+ if (CacheLookup.getValue() == 0) {
+ ++NumSubFrameworkLookups;
+
+ // If the framework dir doesn't exist, we fail.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.begin(),
+ FrameworkName.end());
+ if (Dir == 0) return 0;
+
+ // Otherwise, if it does, remember that this is the right direntry for this
+ // framework.
+ CacheLookup.setValue(Dir);
+ }
+
+ const FileEntry *FE = 0;
+
+ // Check ".../Frameworks/HIToolbox.framework/Headers/HIToolbox.h"
+ llvm::SmallString<1024> HeadersFilename(FrameworkName);
+ HeadersFilename += "Headers/";
+ HeadersFilename.append(SlashPos+1, FilenameEnd);
+ if (!(FE = FileMgr.getFile(HeadersFilename.begin(),
+ HeadersFilename.end()))) {
+
+ // Check ".../Frameworks/HIToolbox.framework/PrivateHeaders/HIToolbox.h"
+ HeadersFilename = FrameworkName;
+ HeadersFilename += "PrivateHeaders/";
+ HeadersFilename.append(SlashPos+1, FilenameEnd);
+ if (!(FE = FileMgr.getFile(HeadersFilename.begin(), HeadersFilename.end())))
+ return 0;
+ }
+
+ // This file is a system header or C++ unfriendly if the old file is.
+ //
+ // Note that the temporary 'DirInfo' is required here, as either call to
+ // getFileInfo could resize the vector and we don't want to rely on order
+ // of evaluation.
+ unsigned DirInfo = getFileInfo(ContextFileEnt).DirInfo;
+ getFileInfo(FE).DirInfo = DirInfo;
+ return FE;
+}
+
+//===----------------------------------------------------------------------===//
+// File Info Management.
+//===----------------------------------------------------------------------===//
+
+
+/// getFileInfo - Return the HeaderFileInfo structure for the specified
+/// FileEntry.
+HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
+ if (FE->getUID() >= FileInfo.size())
+ FileInfo.resize(FE->getUID()+1);
+ return FileInfo[FE->getUID()];
+}
+
+void HeaderSearch::setHeaderFileInfoForUID(HeaderFileInfo HFI, unsigned UID) {
+ if (UID >= FileInfo.size())
+ FileInfo.resize(UID+1);
+ FileInfo[UID] = HFI;
+}
+
+/// ShouldEnterIncludeFile - Mark the specified file as a target of of a
+/// #include, #include_next, or #import directive. Return false if #including
+/// the file will have no effect or true if we should include it.
+bool HeaderSearch::ShouldEnterIncludeFile(const FileEntry *File, bool isImport){
+ ++NumIncluded; // Count # of attempted #includes.
+
+ // Get information about this file.
+ HeaderFileInfo &FileInfo = getFileInfo(File);
+
+ // If this is a #import directive, check that we have not already imported
+ // this header.
+ if (isImport) {
+ // If this has already been imported, don't import it again.
+ FileInfo.isImport = true;
+
+ // Has this already been #import'ed or #include'd?
+ if (FileInfo.NumIncludes) return false;
+ } else {
+ // Otherwise, if this is a #include of a file that was previously #import'd
+ // or if this is the second #include of a #pragma once file, ignore it.
+ if (FileInfo.isImport)
+ return false;
+ }
+
+ // Next, check to see if the file is wrapped with #ifndef guards. If so, and
+ // if the macro that guards it is defined, we know the #include has no effect.
+ if (const IdentifierInfo *ControllingMacro
+ = FileInfo.getControllingMacro(ExternalLookup))
+ if (ControllingMacro->hasMacroDefinition()) {
+ ++NumMultiIncludeFileOptzn;
+ return false;
+ }
+
+ // Increment the number of times this file has been included.
+ ++FileInfo.NumIncludes;
+
+ return true;
+}
+
+
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
new file mode 100644
index 0000000..c2ffd6d
--- /dev/null
+++ b/lib/Lex/Lexer.cpp
@@ -0,0 +1,1809 @@
+//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Lexer and Token interfaces.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: GCC Diagnostics emitted by the lexer:
+// PEDWARN: (form feed|vertical tab) in preprocessing directive
+//
+// Universal characters, unicode, char mapping:
+// WARNING: `%.*s' is not in NFKC
+// WARNING: `%.*s' is not in NFC
+//
+// Other:
+// TODO: Options to support:
+// -fexec-charset,-fwide-exec-charset
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cctype>
+using namespace clang;
+
+static void InitCharacterInfo();
+
+//===----------------------------------------------------------------------===//
+// Token Class Implementation
+//===----------------------------------------------------------------------===//
+
+/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
+bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
+ if (IdentifierInfo *II = getIdentifierInfo())
+ return II->getObjCKeywordID() == objcKey;
+ return false;
+}
+
+/// getObjCKeywordID - Return the ObjC keyword kind.
+tok::ObjCKeywordKind Token::getObjCKeywordID() const {
+ IdentifierInfo *specId = getIdentifierInfo();
+ return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Lexer Class Implementation
+//===----------------------------------------------------------------------===//
+
+void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
+ const char *BufEnd) {
+ InitCharacterInfo();
+
+ BufferStart = BufStart;
+ BufferPtr = BufPtr;
+ BufferEnd = BufEnd;
+
+ assert(BufEnd[0] == 0 &&
+ "We assume that the input buffer has a null character at the end"
+ " to simplify lexing!");
+
+ Is_PragmaLexer = false;
+
+ // Start of the file is a start of line.
+ IsAtStartOfLine = true;
+
+ // We are not after parsing a #.
+ ParsingPreprocessorDirective = false;
+
+ // We are not after parsing #include.
+ ParsingFilename = false;
+
+ // We are not in raw mode. Raw mode disables diagnostics and interpretation
+ // of tokens (e.g. identifiers, thus disabling macro expansion). It is used
+ // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
+ // or otherwise skipping over tokens.
+ LexingRawMode = false;
+
+ // Default to not keeping comments.
+ ExtendedTokenMode = 0;
+}
+
+/// Lexer constructor - Create a new lexer object for the specified buffer
+/// with the specified preprocessor managing the lexing process. This lexer
+/// assumes that the associated file buffer and Preprocessor objects will
+/// outlive it, so it doesn't take ownership of either of them.
+Lexer::Lexer(FileID FID, Preprocessor &PP)
+ : PreprocessorLexer(&PP, FID),
+ FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
+ Features(PP.getLangOptions()) {
+
+ const llvm::MemoryBuffer *InputFile = PP.getSourceManager().getBuffer(FID);
+
+ InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
+ InputFile->getBufferEnd());
+
+ // Default to keeping comments if the preprocessor wants them.
+ SetCommentRetentionState(PP.getCommentRetentionState());
+}
+
+/// Lexer constructor - Create a new raw lexer object. This object is only
+/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+/// range will outlive it, so it doesn't take ownership of it.
+Lexer::Lexer(SourceLocation fileloc, const LangOptions &features,
+ const char *BufStart, const char *BufPtr, const char *BufEnd)
+ : FileLoc(fileloc), Features(features) {
+
+ InitLexer(BufStart, BufPtr, BufEnd);
+
+ // We *are* in raw mode.
+ LexingRawMode = true;
+}
+
+/// Lexer constructor - Create a new raw lexer object. This object is only
+/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+/// range will outlive it, so it doesn't take ownership of it.
+Lexer::Lexer(FileID FID, const SourceManager &SM, const LangOptions &features)
+ : FileLoc(SM.getLocForStartOfFile(FID)), Features(features) {
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+
+ InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(),
+ FromFile->getBufferEnd());
+
+ // We *are* in raw mode.
+ LexingRawMode = true;
+}
+
+/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
+/// _Pragma expansion. This has a variety of magic semantics that this method
+/// sets up. It returns a new'd Lexer that must be delete'd when done.
+///
+/// On entrance to this routine, TokStartLoc is a macro location which has a
+/// spelling loc that indicates the bytes to be lexed for the token and an
+/// instantiation location that indicates where all lexed tokens should be
+/// "expanded from".
+///
+/// FIXME: It would really be nice to make _Pragma just be a wrapper around a
+/// normal lexer that remaps tokens as they fly by. This would require making
+/// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer
+/// interface that could handle this stuff. This would pull GetMappedTokenLoc
+/// out of the critical path of the lexer!
+///
+Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
+ SourceLocation InstantiationLocStart,
+ SourceLocation InstantiationLocEnd,
+ unsigned TokLen, Preprocessor &PP) {
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create the lexer as if we were going to lex the file normally.
+ FileID SpellingFID = SM.getFileID(SpellingLoc);
+ Lexer *L = new Lexer(SpellingFID, PP);
+
+ // Now that the lexer is created, change the start/end locations so that we
+ // just lex the subsection of the file that we want. This is lexing from a
+ // scratch buffer.
+ const char *StrData = SM.getCharacterData(SpellingLoc);
+
+ L->BufferPtr = StrData;
+ L->BufferEnd = StrData+TokLen;
+ assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
+
+ // Set the SourceLocation with the remapping information. This ensures that
+ // GetMappedTokenLoc will remap the tokens as they are lexed.
+ L->FileLoc = SM.createInstantiationLoc(SM.getLocForStartOfFile(SpellingFID),
+ InstantiationLocStart,
+ InstantiationLocEnd, TokLen);
+
+ // Ensure that the lexer thinks it is inside a directive, so that end \n will
+ // return an EOM token.
+ L->ParsingPreprocessorDirective = true;
+
+ // This lexer really is for _Pragma.
+ L->Is_PragmaLexer = true;
+ return L;
+}
+
+
+/// Stringify - Convert the specified string into a C string, with surrounding
+/// ""'s, and with escaped \ and " characters.
+std::string Lexer::Stringify(const std::string &Str, bool Charify) {
+ std::string Result = Str;
+ char Quote = Charify ? '\'' : '"';
+ for (unsigned i = 0, e = Result.size(); i != e; ++i) {
+ if (Result[i] == '\\' || Result[i] == Quote) {
+ Result.insert(Result.begin()+i, '\\');
+ ++i; ++e;
+ }
+ }
+ return Result;
+}
+
+/// Stringify - Convert the specified string into a C string by escaping '\'
+/// and " characters. This does not add surrounding ""'s to the string.
+void Lexer::Stringify(llvm::SmallVectorImpl<char> &Str) {
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ if (Str[i] == '\\' || Str[i] == '"') {
+ Str.insert(Str.begin()+i, '\\');
+ ++i; ++e;
+ }
+ }
+}
+
+
+/// MeasureTokenLength - Relex the token at the specified location and return
+/// its length in bytes in the input file. If the token needs cleaning (e.g.
+/// includes a trigraph or an escaped newline) then this count includes bytes
+/// that are part of that.
+unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ // TODO: this could be special cased for common tokens like identifiers, ')',
+ // etc to make this faster, if it mattered. Just look at StrData[0] to handle
+ // all obviously single-char tokens. This could use
+ // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
+ // something.
+
+ // If this comes from a macro expansion, we really do want the macro name, not
+ // the token this macro expanded to.
+ Loc = SM.getInstantiationLoc(Loc);
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ std::pair<const char *,const char *> Buffer = SM.getBufferData(LocInfo.first);
+ const char *StrData = Buffer.first+LocInfo.second;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(Loc, LangOpts, Buffer.first, StrData, Buffer.second);
+ Token TheTok;
+ TheLexer.LexFromRawLexer(TheTok);
+ return TheTok.getLength();
+}
+
+//===----------------------------------------------------------------------===//
+// Character information.
+//===----------------------------------------------------------------------===//
+
+static unsigned char CharInfo[256];
+
+enum {
+ CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0'
+ CHAR_VERT_WS = 0x02, // '\r', '\n'
+ CHAR_LETTER = 0x04, // a-z,A-Z
+ CHAR_NUMBER = 0x08, // 0-9
+ CHAR_UNDER = 0x10, // _
+ CHAR_PERIOD = 0x20 // .
+};
+
+static void InitCharacterInfo() {
+ static bool isInited = false;
+ if (isInited) return;
+ isInited = true;
+
+ // Intiialize the CharInfo table.
+ // TODO: statically initialize this.
+ CharInfo[(int)' '] = CharInfo[(int)'\t'] =
+ CharInfo[(int)'\f'] = CharInfo[(int)'\v'] = CHAR_HORZ_WS;
+ CharInfo[(int)'\n'] = CharInfo[(int)'\r'] = CHAR_VERT_WS;
+
+ CharInfo[(int)'_'] = CHAR_UNDER;
+ CharInfo[(int)'.'] = CHAR_PERIOD;
+ for (unsigned i = 'a'; i <= 'z'; ++i)
+ CharInfo[i] = CharInfo[i+'A'-'a'] = CHAR_LETTER;
+ for (unsigned i = '0'; i <= '9'; ++i)
+ CharInfo[i] = CHAR_NUMBER;
+}
+
+/// isIdentifierBody - Return true if this is the body character of an
+/// identifier, which is [a-zA-Z0-9_].
+static inline bool isIdentifierBody(unsigned char c) {
+ return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false;
+}
+
+/// isHorizontalWhitespace - Return true if this character is horizontal
+/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'.
+static inline bool isHorizontalWhitespace(unsigned char c) {
+ return (CharInfo[c] & CHAR_HORZ_WS) ? true : false;
+}
+
+/// isWhitespace - Return true if this character is horizontal or vertical
+/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false
+/// for '\0'.
+static inline bool isWhitespace(unsigned char c) {
+ return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false;
+}
+
+/// isNumberBody - Return true if this is the body character of an
+/// preprocessing number, which is [a-zA-Z0-9_.].
+static inline bool isNumberBody(unsigned char c) {
+ return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ?
+ true : false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Diagnostics forwarding code.
+//===----------------------------------------------------------------------===//
+
+/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
+/// lexer buffer was all instantiated at a single point, perform the mapping.
+/// This is currently only used for _Pragma implementation, so it is the slow
+/// path of the hot getSourceLocation method. Do not allow it to be inlined.
+static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
+ SourceLocation FileLoc,
+ unsigned CharNo,
+ unsigned TokLen) DISABLE_INLINE;
+static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
+ SourceLocation FileLoc,
+ unsigned CharNo, unsigned TokLen) {
+ assert(FileLoc.isMacroID() && "Must be an instantiation");
+
+ // Otherwise, we're lexing "mapped tokens". This is used for things like
+ // _Pragma handling. Combine the instantiation location of FileLoc with the
+ // spelling location.
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create a new SLoc which is expanded from Instantiation(FileLoc) but whose
+ // characters come from spelling(FileLoc)+Offset.
+ SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
+ SpellingLoc = SpellingLoc.getFileLocWithOffset(CharNo);
+
+ // Figure out the expansion loc range, which is the range covered by the
+ // original _Pragma(...) sequence.
+ std::pair<SourceLocation,SourceLocation> II =
+ SM.getImmediateInstantiationRange(FileLoc);
+
+ return SM.createInstantiationLoc(SpellingLoc, II.first, II.second, TokLen);
+}
+
+/// getSourceLocation - Return a source location identifier for the specified
+/// offset in the current file.
+SourceLocation Lexer::getSourceLocation(const char *Loc,
+ unsigned TokLen) const {
+ assert(Loc >= BufferStart && Loc <= BufferEnd &&
+ "Location out of range for this buffer!");
+
+ // In the normal case, we're just lexing from a simple file buffer, return
+ // the file id from FileLoc with the offset specified.
+ unsigned CharNo = Loc-BufferStart;
+ if (FileLoc.isFileID())
+ return FileLoc.getFileLocWithOffset(CharNo);
+
+ // Otherwise, this is the _Pragma lexer case, which pretends that all of the
+ // tokens are lexed from where the _Pragma was defined.
+ assert(PP && "This doesn't work on raw lexers");
+ return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
+}
+
+/// Diag - Forwarding function for diagnostics. This translate a source
+/// position in the current buffer into a SourceLocation object for rendering.
+DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
+ return PP->Diag(getSourceLocation(Loc), DiagID);
+}
+
+//===----------------------------------------------------------------------===//
+// Trigraph and Escaped Newline Handling Code.
+//===----------------------------------------------------------------------===//
+
+/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
+/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
+static char GetTrigraphCharForLetter(char Letter) {
+ switch (Letter) {
+ default: return 0;
+ case '=': return '#';
+ case ')': return ']';
+ case '(': return '[';
+ case '!': return '|';
+ case '\'': return '^';
+ case '>': return '}';
+ case '/': return '\\';
+ case '<': return '{';
+ case '-': return '~';
+ }
+}
+
+/// DecodeTrigraphChar - If the specified character is a legal trigraph when
+/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
+/// return the result character. Finally, emit a warning about trigraph use
+/// whether trigraphs are enabled or not.
+static char DecodeTrigraphChar(const char *CP, Lexer *L) {
+ char Res = GetTrigraphCharForLetter(*CP);
+ if (!Res || !L) return Res;
+
+ if (!L->getFeatures().Trigraphs) {
+ if (!L->isLexingRawMode())
+ L->Diag(CP-2, diag::trigraph_ignored);
+ return 0;
+ }
+
+ if (!L->isLexingRawMode())
+ L->Diag(CP-2, diag::trigraph_converted) << std::string()+Res;
+ return Res;
+}
+
+/// getEscapedNewLineSize - Return the size of the specified escaped newline,
+/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
+/// trigraph equivalent on entry to this function.
+unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
+ unsigned Size = 0;
+ while (isWhitespace(Ptr[Size])) {
+ ++Size;
+
+ if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
+ continue;
+
+ // If this is a \r\n or \n\r, skip the other half.
+ if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
+ Ptr[Size-1] != Ptr[Size])
+ ++Size;
+
+ return Size;
+ }
+
+ // Not an escaped newline, must be a \t or something else.
+ return 0;
+}
+
+/// SkipEscapedNewLines - If P points to an escaped newline (or a series of
+/// them), skip over them and return the first non-escaped-newline found,
+/// otherwise return P.
+const char *Lexer::SkipEscapedNewLines(const char *P) {
+ while (1) {
+ const char *AfterEscape;
+ if (*P == '\\') {
+ AfterEscape = P+1;
+ } else if (*P == '?') {
+ // If not a trigraph for escape, bail out.
+ if (P[1] != '?' || P[2] != '/')
+ return P;
+ AfterEscape = P+3;
+ } else {
+ return P;
+ }
+
+ unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
+ if (NewLineSize == 0) return P;
+ P = AfterEscape+NewLineSize;
+ }
+}
+
+
+/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
+/// get its size, and return it. This is tricky in several cases:
+/// 1. If currently at the start of a trigraph, we warn about the trigraph,
+/// then either return the trigraph (skipping 3 chars) or the '?',
+/// depending on whether trigraphs are enabled or not.
+/// 2. If this is an escaped newline (potentially with whitespace between
+/// the backslash and newline), implicitly skip the newline and return
+/// the char after it.
+/// 3. If this is a UCN, return it. FIXME: C++ UCN's?
+///
+/// This handles the slow/uncommon case of the getCharAndSize method. Here we
+/// know that we can accumulate into Size, and that we have already incremented
+/// Ptr by Size bytes.
+///
+/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
+/// be updated to match.
+///
+char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
+ Token *Tok) {
+ // If we have a slash, look for an escaped newline.
+ if (Ptr[0] == '\\') {
+ ++Size;
+ ++Ptr;
+Slash:
+ // Common case, backslash-char where the char is not whitespace.
+ if (!isWhitespace(Ptr[0])) return '\\';
+
+ // See if we have optional whitespace characters followed by a newline.
+ if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
+ // Remember that this token needs to be cleaned.
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
+
+ // Warn if there was whitespace between the backslash and newline.
+ if (EscapedNewLineSize != 1 && Tok && !isLexingRawMode())
+ Diag(Ptr, diag::backslash_newline_space);
+
+ // Found backslash<whitespace><newline>. Parse the char after it.
+ Size += EscapedNewLineSize;
+ Ptr += EscapedNewLineSize;
+ // Use slow version to accumulate a correct size field.
+ return getCharAndSizeSlow(Ptr, Size, Tok);
+ }
+
+ // Otherwise, this is not an escaped newline, just return the slash.
+ return '\\';
+ }
+
+ // If this is a trigraph, process it.
+ if (Ptr[0] == '?' && Ptr[1] == '?') {
+ // If this is actually a legal trigraph (not something like "??x"), emit
+ // a trigraph warning. If so, and if trigraphs are enabled, return it.
+ if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
+ // Remember that this token needs to be cleaned.
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
+
+ Ptr += 3;
+ Size += 3;
+ if (C == '\\') goto Slash;
+ return C;
+ }
+ }
+
+ // If this is neither, return a single character.
+ ++Size;
+ return *Ptr;
+}
+
+
+/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
+/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
+/// and that we have already incremented Ptr by Size bytes.
+///
+/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
+/// be updated to match.
+char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
+ const LangOptions &Features) {
+ // If we have a slash, look for an escaped newline.
+ if (Ptr[0] == '\\') {
+ ++Size;
+ ++Ptr;
+Slash:
+ // Common case, backslash-char where the char is not whitespace.
+ if (!isWhitespace(Ptr[0])) return '\\';
+
+ // See if we have optional whitespace characters followed by a newline.
+ if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
+ // Found backslash<whitespace><newline>. Parse the char after it.
+ Size += EscapedNewLineSize;
+ Ptr += EscapedNewLineSize;
+
+ // Use slow version to accumulate a correct size field.
+ return getCharAndSizeSlowNoWarn(Ptr, Size, Features);
+ }
+
+ // Otherwise, this is not an escaped newline, just return the slash.
+ return '\\';
+ }
+
+ // If this is a trigraph, process it.
+ if (Features.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
+ // If this is actually a legal trigraph (not something like "??x"), return
+ // it.
+ if (char C = GetTrigraphCharForLetter(Ptr[2])) {
+ Ptr += 3;
+ Size += 3;
+ if (C == '\\') goto Slash;
+ return C;
+ }
+ }
+
+ // If this is neither, return a single character.
+ ++Size;
+ return *Ptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper methods for lexing.
+//===----------------------------------------------------------------------===//
+
+void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
+ // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
+ unsigned Size;
+ unsigned char C = *CurPtr++;
+ while (isIdentifierBody(C)) {
+ C = *CurPtr++;
+ }
+ --CurPtr; // Back up over the skipped character.
+
+ // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
+ // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
+ // FIXME: UCNs.
+ if (C != '\\' && C != '?' && (C != '$' || !Features.DollarIdents)) {
+FinishIdentifier:
+ const char *IdStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::identifier);
+
+ // If we are in raw mode, return this identifier raw. There is no need to
+ // look up identifier information or attempt to macro expand it.
+ if (LexingRawMode) return;
+
+ // Fill in Result.IdentifierInfo, looking up the identifier in the
+ // identifier table.
+ IdentifierInfo *II = PP->LookUpIdentifierInfo(Result, IdStart);
+
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ Result.setKind(II->getTokenID());
+
+ // Finally, now that we know we have an identifier, pass this off to the
+ // preprocessor, which may macro expand it or something.
+ if (II->isHandleIdentifierCase())
+ PP->HandleIdentifier(Result);
+ return;
+ }
+
+ // Otherwise, $,\,? in identifier found. Enter slower path.
+
+ C = getCharAndSize(CurPtr, Size);
+ while (1) {
+ if (C == '$') {
+ // If we hit a $ and they are not supported in identifiers, we are done.
+ if (!Features.DollarIdents) goto FinishIdentifier;
+
+ // Otherwise, emit a diagnostic and continue.
+ if (!isLexingRawMode())
+ Diag(CurPtr, diag::ext_dollar_in_identifier);
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ continue;
+ } else if (!isIdentifierBody(C)) { // FIXME: UCNs.
+ // Found end of identifier.
+ goto FinishIdentifier;
+ }
+
+ // Otherwise, this character is good, consume it.
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+
+ C = getCharAndSize(CurPtr, Size);
+ while (isIdentifierBody(C)) { // FIXME: UCNs.
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ }
+ }
+}
+
+
+/// LexNumericConstant - Lex the remainder of a integer or floating point
+/// constant. From[-1] is the first character lexed. Return the end of the
+/// constant.
+void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
+ unsigned Size;
+ char C = getCharAndSize(CurPtr, Size);
+ char PrevCh = 0;
+ while (isNumberBody(C)) { // FIXME: UCNs?
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ PrevCh = C;
+ C = getCharAndSize(CurPtr, Size);
+ }
+
+ // If we fell out, check for a sign, due to 1e+12. If we have one, continue.
+ if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e'))
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+
+ // If we have a hex FP constant, continue.
+ if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p'))
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
+ Result.setLiteralData(TokStart);
+}
+
+/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
+/// either " or L".
+void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) {
+ const char *NulCharacter = 0; // Does this string contain the \0 character?
+
+ char C = getAndAdvanceChar(CurPtr, Result);
+ while (C != '"') {
+ // Skip escaped characters.
+ if (C == '\\') {
+ // Skip the escaped character.
+ C = getAndAdvanceChar(CurPtr, Result);
+ } else if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ Diag(BufferPtr, diag::err_unterminated_string);
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return;
+ } else if (C == 0) {
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If a nul character existed in the string, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_string);
+
+ // Update the location of the token as well as the BufferPtr instance var.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr,
+ Wide ? tok::wide_string_literal : tok::string_literal);
+ Result.setLiteralData(TokStart);
+}
+
+/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
+/// after having lexed the '<' character. This is used for #include filenames.
+void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
+ const char *NulCharacter = 0; // Does this string contain the \0 character?
+ const char *AfterLessPos = CurPtr;
+ char C = getAndAdvanceChar(CurPtr, Result);
+ while (C != '>') {
+ // Skip escaped characters.
+ if (C == '\\') {
+ // Skip the escaped character.
+ C = getAndAdvanceChar(CurPtr, Result);
+ } else if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ // If the filename is unterminated, then it must just be a lone <
+ // character. Return this as such.
+ FormTokenWithChars(Result, AfterLessPos, tok::less);
+ return;
+ } else if (C == 0) {
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If a nul character existed in the string, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_string);
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
+ Result.setLiteralData(TokStart);
+}
+
+
+/// LexCharConstant - Lex the remainder of a character constant, after having
+/// lexed either ' or L'.
+void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
+ const char *NulCharacter = 0; // Does this character contain the \0 character?
+
+ // Handle the common case of 'x' and '\y' efficiently.
+ char C = getAndAdvanceChar(CurPtr, Result);
+ if (C == '\'') {
+ if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ Diag(BufferPtr, diag::err_empty_character);
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return;
+ } else if (C == '\\') {
+ // Skip the escaped character.
+ // FIXME: UCN's.
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') {
+ ++CurPtr;
+ } else {
+ // Fall back on generic code for embedded nulls, newlines, wide chars.
+ do {
+ // Skip escaped characters.
+ if (C == '\\') {
+ // Skip the escaped character.
+ C = getAndAdvanceChar(CurPtr, Result);
+ } else if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (!isLexingRawMode() && !Features.AsmPreprocessor)
+ Diag(BufferPtr, diag::err_unterminated_char);
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return;
+ } else if (C == 0) {
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ } while (C != '\'');
+ }
+
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_char);
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::char_constant);
+ Result.setLiteralData(TokStart);
+}
+
+/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
+/// Update BufferPtr to point to the next non-whitespace character and return.
+///
+/// This method forms a token and returns true if KeepWhitespaceMode is enabled.
+///
+bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
+ // Whitespace - Skip it, then return the token after the whitespace.
+ unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently.
+ while (1) {
+ // Skip horizontal whitespace very aggressively.
+ while (isHorizontalWhitespace(Char))
+ Char = *++CurPtr;
+
+ // Otherwise if we have something other than whitespace, we're done.
+ if (Char != '\n' && Char != '\r')
+ break;
+
+ if (ParsingPreprocessorDirective) {
+ // End of preprocessor directive line, let LexTokenInternal handle this.
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // ok, but handle newline.
+ // The returned token is at the start of the line.
+ Result.setFlag(Token::StartOfLine);
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+ Char = *++CurPtr;
+ }
+
+ // If this isn't immediately after a newline, there is leading space.
+ char PrevChar = CurPtr[-1];
+ if (PrevChar != '\n' && PrevChar != '\r')
+ Result.setFlag(Token::LeadingSpace);
+
+ // If the client wants us to return whitespace, return it now.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+}
+
+// SkipBCPLComment - We have just read the // characters from input. Skip until
+// we find the newline character thats terminate the comment. Then update
+/// BufferPtr and return. If we're in KeepCommentMode, this will form the token
+/// and return true.
+bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
+ // If BCPL comments aren't explicitly enabled for this language, emit an
+ // extension warning.
+ if (!Features.BCPLComment && !isLexingRawMode()) {
+ Diag(BufferPtr, diag::ext_bcpl_comment);
+
+ // Mark them enabled so we only emit one warning for this translation
+ // unit.
+ Features.BCPLComment = true;
+ }
+
+ // Scan over the body of the comment. The common case, when scanning, is that
+ // the comment contains normal ascii characters with nothing interesting in
+ // them. As such, optimize for this case with the inner loop.
+ char C;
+ do {
+ C = *CurPtr;
+ // FIXME: Speedup BCPL comment lexing. Just scan for a \n or \r character.
+ // If we find a \n character, scan backwards, checking to see if it's an
+ // escaped newline, like we do for block comments.
+
+ // Skip over characters in the fast loop.
+ while (C != 0 && // Potentially EOF.
+ C != '\\' && // Potentially escaped newline.
+ C != '?' && // Potentially trigraph.
+ C != '\n' && C != '\r') // Newline or DOS-style newline.
+ C = *++CurPtr;
+
+ // If this is a newline, we're done.
+ if (C == '\n' || C == '\r')
+ break; // Found the newline? Break out!
+
+ // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
+ // properly decode the character. Read it in raw mode to avoid emitting
+ // diagnostics about things like trigraphs. If we see an escaped newline,
+ // we'll handle it below.
+ const char *OldPtr = CurPtr;
+ bool OldRawMode = isLexingRawMode();
+ LexingRawMode = true;
+ C = getAndAdvanceChar(CurPtr, Result);
+ LexingRawMode = OldRawMode;
+
+ // If the char that we finally got was a \n, then we must have had something
+ // like \<newline><newline>. We don't want to have consumed the second
+ // newline, we want CurPtr, to end up pointing to it down below.
+ if (C == '\n' || C == '\r') {
+ --CurPtr;
+ C = 'x'; // doesn't matter what this is.
+ }
+
+ // If we read multiple characters, and one of those characters was a \r or
+ // \n, then we had an escaped newline within the comment. Emit diagnostic
+ // unless the next line is also a // comment.
+ if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') {
+ for (; OldPtr != CurPtr; ++OldPtr)
+ if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
+ // Okay, we found a // comment that ends in a newline, if the next
+ // line is also a // comment, but has spaces, don't emit a diagnostic.
+ if (isspace(C)) {
+ const char *ForwardPtr = CurPtr;
+ while (isspace(*ForwardPtr)) // Skip whitespace.
+ ++ForwardPtr;
+ if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
+ break;
+ }
+
+ if (!isLexingRawMode())
+ Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
+ break;
+ }
+ }
+
+ if (CurPtr == BufferEnd+1) { --CurPtr; break; }
+ } while (C != '\n' && C != '\r');
+
+ // Found but did not consume the newline.
+
+ // If we are returning comments as tokens, return this comment as a token.
+ if (inKeepCommentMode())
+ return SaveBCPLComment(Result, CurPtr);
+
+ // If we are inside a preprocessor directive and we see the end of line,
+ // return immediately, so that the lexer can return this as an EOM token.
+ if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // Otherwise, eat the \n character. We don't care if this is a \n\r or
+ // \r\n sequence. This is an efficiency hack (because we know the \n can't
+ // contribute to another token), it isn't needed for correctness. Note that
+ // this is ok even in KeepWhitespaceMode, because we would have returned the
+ /// comment above in that mode.
+ ++CurPtr;
+
+ // The next returned token is at the start of the line.
+ Result.setFlag(Token::StartOfLine);
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+ BufferPtr = CurPtr;
+ return false;
+}
+
+/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
+/// an appropriate way and return it.
+bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
+ // If we're not in a preprocessor directive, just return the // comment
+ // directly.
+ FormTokenWithChars(Result, CurPtr, tok::comment);
+
+ if (!ParsingPreprocessorDirective)
+ return true;
+
+ // If this BCPL-style comment is in a macro definition, transmogrify it into
+ // a C-style block comment.
+ std::string Spelling = PP->getSpelling(Result);
+ assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?");
+ Spelling[1] = '*'; // Change prefix to "/*".
+ Spelling += "*/"; // add suffix.
+
+ Result.setKind(tok::comment);
+ PP->CreateString(&Spelling[0], Spelling.size(), Result,
+ Result.getLocation());
+ return true;
+}
+
+/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
+/// character (either \n or \r) is part of an escaped newline sequence. Issue a
+/// diagnostic if so. We know that the newline is inside of a block comment.
+static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
+ Lexer *L) {
+ assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
+
+ // Back up off the newline.
+ --CurPtr;
+
+ // If this is a two-character newline sequence, skip the other character.
+ if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
+ // \n\n or \r\r -> not escaped newline.
+ if (CurPtr[0] == CurPtr[1])
+ return false;
+ // \n\r or \r\n -> skip the newline.
+ --CurPtr;
+ }
+
+ // If we have horizontal whitespace, skip over it. We allow whitespace
+ // between the slash and newline.
+ bool HasSpace = false;
+ while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
+ --CurPtr;
+ HasSpace = true;
+ }
+
+ // If we have a slash, we know this is an escaped newline.
+ if (*CurPtr == '\\') {
+ if (CurPtr[-1] != '*') return false;
+ } else {
+ // It isn't a slash, is it the ?? / trigraph?
+ if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
+ CurPtr[-3] != '*')
+ return false;
+
+ // This is the trigraph ending the comment. Emit a stern warning!
+ CurPtr -= 2;
+
+ // If no trigraphs are enabled, warn that we ignored this trigraph and
+ // ignore this * character.
+ if (!L->getFeatures().Trigraphs) {
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
+ return false;
+ }
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::trigraph_ends_block_comment);
+ }
+
+ // Warn about having an escaped newline between the */ characters.
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
+
+ // If there was space between the backslash and newline, warn about it.
+ if (HasSpace && !L->isLexingRawMode())
+ L->Diag(CurPtr, diag::backslash_newline_space);
+
+ return true;
+}
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#elif __ALTIVEC__
+#include <altivec.h>
+#undef bool
+#endif
+
+/// SkipBlockComment - We have just read the /* characters from input. Read
+/// until we find the */ characters that terminate the comment. Note that we
+/// don't bother decoding trigraphs or escaped newlines in block comments,
+/// because they cannot cause the comment to end. The only thing that can
+/// happen is the comment could end with an escaped newline between the */ end
+/// of comment.
+///
+/// If KeepCommentMode is enabled, this forms a token from the comment and
+/// returns true.
+bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
+ // Scan one character past where we should, looking for a '/' character. Once
+ // we find it, check to see if it was preceeded by a *. This common
+ // optimization helps people who like to put a lot of * characters in their
+ // comments.
+
+ // The first character we get with newlines and trigraphs skipped to handle
+ // the degenerate /*/ case below correctly if the * has an escaped newline
+ // after it.
+ unsigned CharSize;
+ unsigned char C = getCharAndSize(CurPtr, CharSize);
+ CurPtr += CharSize;
+ if (C == 0 && CurPtr == BufferEnd+1) {
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_block_comment);
+ --CurPtr;
+
+ // KeepWhitespaceMode should return this broken comment as a token. Since
+ // it isn't a well formed comment, just return it as an 'unknown' token.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // Check to see if the first character after the '/*' is another /. If so,
+ // then this slash does not end the block comment, it is part of it.
+ if (C == '/')
+ C = *CurPtr++;
+
+ while (1) {
+ // Skip over all non-interesting characters until we find end of buffer or a
+ // (probably ending) '/' character.
+ if (CurPtr + 24 < BufferEnd) {
+ // While not aligned to a 16-byte boundary.
+ while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
+ C = *CurPtr++;
+
+ if (C == '/') goto FoundSlash;
+
+#ifdef __SSE2__
+ __m128i Slashes = _mm_set_epi8('/', '/', '/', '/', '/', '/', '/', '/',
+ '/', '/', '/', '/', '/', '/', '/', '/');
+ while (CurPtr+16 <= BufferEnd &&
+ _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes)) == 0)
+ CurPtr += 16;
+#elif __ALTIVEC__
+ __vector unsigned char Slashes = {
+ '/', '/', '/', '/', '/', '/', '/', '/',
+ '/', '/', '/', '/', '/', '/', '/', '/'
+ };
+ while (CurPtr+16 <= BufferEnd &&
+ !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes))
+ CurPtr += 16;
+#else
+ // Scan for '/' quickly. Many block comments are very large.
+ while (CurPtr[0] != '/' &&
+ CurPtr[1] != '/' &&
+ CurPtr[2] != '/' &&
+ CurPtr[3] != '/' &&
+ CurPtr+4 < BufferEnd) {
+ CurPtr += 4;
+ }
+#endif
+
+ // It has to be one of the bytes scanned, increment to it and read one.
+ C = *CurPtr++;
+ }
+
+ // Loop to scan the remainder.
+ while (C != '/' && C != '\0')
+ C = *CurPtr++;
+
+ FoundSlash:
+ if (C == '/') {
+ if (CurPtr[-2] == '*') // We found the final */. We're done!
+ break;
+
+ if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
+ if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
+ // We found the final */, though it had an escaped newline between the
+ // * and /. We're done!
+ break;
+ }
+ }
+ if (CurPtr[0] == '*' && CurPtr[1] != '/') {
+ // If this is a /* inside of the comment, emit a warning. Don't do this
+ // if this is a /*/, which will end the comment. This misses cases with
+ // embedded escaped newlines, but oh well.
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::warn_nested_block_comment);
+ }
+ } else if (C == 0 && CurPtr == BufferEnd+1) {
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_block_comment);
+ // Note: the user probably forgot a */. We could continue immediately
+ // after the /*, but this would involve lexing a lot of what really is the
+ // comment, which surely would confuse the parser.
+ --CurPtr;
+
+ // KeepWhitespaceMode should return this broken comment as a token. Since
+ // it isn't a well formed comment, just return it as an 'unknown' token.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+ }
+ C = *CurPtr++;
+ }
+
+ // If we are returning comments as tokens, return this comment as a token.
+ if (inKeepCommentMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::comment);
+ return true;
+ }
+
+ // It is common for the tokens immediately after a /**/ comment to be
+ // whitespace. Instead of going through the big switch, handle it
+ // efficiently now. This is safe even in KeepWhitespaceMode because we would
+ // have already returned above with the comment as a token.
+ if (isHorizontalWhitespace(*CurPtr)) {
+ Result.setFlag(Token::LeadingSpace);
+ SkipWhitespace(Result, CurPtr+1);
+ return false;
+ }
+
+ // Otherwise, just return so that the next character will be lexed as a token.
+ BufferPtr = CurPtr;
+ Result.setFlag(Token::LeadingSpace);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Primary Lexing Entry Points
+//===----------------------------------------------------------------------===//
+
+/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
+/// uninterpreted string. This switches the lexer out of directive mode.
+std::string Lexer::ReadToEndOfLine() {
+ assert(ParsingPreprocessorDirective && ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+ std::string Result;
+ Token Tmp;
+
+ // CurPtr - Cache BufferPtr in an automatic variable.
+ const char *CurPtr = BufferPtr;
+ while (1) {
+ char Char = getAndAdvanceChar(CurPtr, Tmp);
+ switch (Char) {
+ default:
+ Result += Char;
+ break;
+ case 0: // Null.
+ // Found end of file?
+ if (CurPtr-1 != BufferEnd) {
+ // Nope, normal character, continue.
+ Result += Char;
+ break;
+ }
+ // FALL THROUGH.
+ case '\r':
+ case '\n':
+ // Okay, we found the end of the line. First, back up past the \0, \r, \n.
+ assert(CurPtr[-1] == Char && "Trigraphs for newline?");
+ BufferPtr = CurPtr-1;
+
+ // Next, lex the character, which should handle the EOM transition.
+ Lex(Tmp);
+ assert(Tmp.is(tok::eom) && "Unexpected token!");
+
+ // Finally, we're done, return the string we found.
+ return Result;
+ }
+ }
+}
+
+/// LexEndOfFile - CurPtr points to the end of this file. Handle this
+/// condition, reporting diagnostics and handling other edge cases as required.
+/// This returns true if Result contains a token, false if PP.Lex should be
+/// called again.
+bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
+ // If we hit the end of the file while parsing a preprocessor directive,
+ // end the preprocessor directive first. The next token returned will
+ // then be the end of file.
+ if (ParsingPreprocessorDirective) {
+ // Done parsing the "line".
+ ParsingPreprocessorDirective = false;
+ // Update the location of token as well as BufferPtr.
+ FormTokenWithChars(Result, CurPtr, tok::eom);
+
+ // Restore comment saving mode, in case it was disabled for directive.
+ SetCommentRetentionState(PP->getCommentRetentionState());
+ return true; // Have a token.
+ }
+
+ // If we are in raw mode, return this event as an EOF token. Let the caller
+ // that put us in raw mode handle the event.
+ if (isLexingRawMode()) {
+ Result.startToken();
+ BufferPtr = BufferEnd;
+ FormTokenWithChars(Result, BufferEnd, tok::eof);
+ return true;
+ }
+
+ // Otherwise, issue diagnostics for unterminated #if and missing newline.
+
+ // If we are in a #if directive, emit an error.
+ while (!ConditionalStack.empty()) {
+ PP->Diag(ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ ConditionalStack.pop_back();
+ }
+
+ // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
+ // a pedwarn.
+ if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r'))
+ Diag(BufferEnd, diag::ext_no_newline_eof)
+ << CodeModificationHint::CreateInsertion(getSourceLocation(BufferEnd),
+ "\n");
+
+ BufferPtr = CurPtr;
+
+ // Finally, let the preprocessor handle this.
+ return PP->HandleEndOfFile(Result);
+}
+
+/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
+/// the specified lexer will return a tok::l_paren token, 0 if it is something
+/// else and 2 if there are no more tokens in the buffer controlled by the
+/// lexer.
+unsigned Lexer::isNextPPTokenLParen() {
+ assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
+
+ // Switch to 'skipping' mode. This will ensure that we can lex a token
+ // without emitting diagnostics, disables macro expansion, and will cause EOF
+ // to return an EOF token instead of popping the include stack.
+ LexingRawMode = true;
+
+ // Save state that can be changed while lexing so that we can restore it.
+ const char *TmpBufferPtr = BufferPtr;
+ bool inPPDirectiveMode = ParsingPreprocessorDirective;
+
+ Token Tok;
+ Tok.startToken();
+ LexTokenInternal(Tok);
+
+ // Restore state that may have changed.
+ BufferPtr = TmpBufferPtr;
+ ParsingPreprocessorDirective = inPPDirectiveMode;
+
+ // Restore the lexer back to non-skipping mode.
+ LexingRawMode = false;
+
+ if (Tok.is(tok::eof))
+ return 2;
+ return Tok.is(tok::l_paren);
+}
+
+
+/// LexTokenInternal - This implements a simple C family lexer. It is an
+/// extremely performance critical piece of code. This assumes that the buffer
+/// has a null character at the end of the file. Return true if an error
+/// occurred and compilation should terminate, false if normal. This returns a
+/// preprocessing token, not a normal token, as such, it is an internal
+/// interface. It assumes that the Flags of result have been cleared before
+/// calling this.
+void Lexer::LexTokenInternal(Token &Result) {
+LexNextToken:
+ // New token, can't need cleaning yet.
+ Result.clearFlag(Token::NeedsCleaning);
+ Result.setIdentifierInfo(0);
+
+ // CurPtr - Cache BufferPtr in an automatic variable.
+ const char *CurPtr = BufferPtr;
+
+ // Small amounts of horizontal whitespace is very common between tokens.
+ if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
+ ++CurPtr;
+ while ((*CurPtr == ' ') || (*CurPtr == '\t'))
+ ++CurPtr;
+
+ // If we are keeping whitespace and other tokens, just return what we just
+ // skipped. The next lexer invocation will return the token after the
+ // whitespace.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return;
+ }
+
+ BufferPtr = CurPtr;
+ Result.setFlag(Token::LeadingSpace);
+ }
+
+ unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
+
+ // Read a character, advancing over it.
+ char Char = getAndAdvanceChar(CurPtr, Result);
+ tok::TokenKind Kind;
+
+ switch (Char) {
+ case 0: // Null.
+ // Found end of file?
+ if (CurPtr-1 == BufferEnd) {
+ // Read the PP instance variable into an automatic variable, because
+ // LexEndOfFile will often delete 'this'.
+ Preprocessor *PPCache = PP;
+ if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file.
+ return; // Got a token to return.
+ assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
+ return PPCache->Lex(Result);
+ }
+
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::null_in_file);
+ Result.setFlag(Token::LeadingSpace);
+ if (SkipWhitespace(Result, CurPtr))
+ return; // KeepWhitespaceMode
+
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ case '\n':
+ case '\r':
+ // If we are inside a preprocessor directive and we see the end of line,
+ // we know we are done with the directive, so return an EOM token.
+ if (ParsingPreprocessorDirective) {
+ // Done parsing the "line".
+ ParsingPreprocessorDirective = false;
+
+ // Restore comment saving mode, in case it was disabled for directive.
+ SetCommentRetentionState(PP->getCommentRetentionState());
+
+ // Since we consumed a newline, we are back at the start of a line.
+ IsAtStartOfLine = true;
+
+ Kind = tok::eom;
+ break;
+ }
+ // The returned token is at the start of the line.
+ Result.setFlag(Token::StartOfLine);
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+
+ if (SkipWhitespace(Result, CurPtr))
+ return; // KeepWhitespaceMode
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ SkipHorizontalWhitespace:
+ Result.setFlag(Token::LeadingSpace);
+ if (SkipWhitespace(Result, CurPtr))
+ return; // KeepWhitespaceMode
+
+ SkipIgnoredUnits:
+ CurPtr = BufferPtr;
+
+ // If the next token is obviously a // or /* */ comment, skip it efficiently
+ // too (without going through the big switch stmt).
+ if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
+ Features.BCPLComment) {
+ SkipBCPLComment(Result, CurPtr+2);
+ goto SkipIgnoredUnits;
+ } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
+ SkipBlockComment(Result, CurPtr+2);
+ goto SkipIgnoredUnits;
+ } else if (isHorizontalWhitespace(*CurPtr)) {
+ goto SkipHorizontalWhitespace;
+ }
+ goto LexNextToken; // GCC isn't tail call eliminating.
+
+ // C99 6.4.4.1: Integer Constants.
+ // C99 6.4.4.2: Floating Constants.
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexNumericConstant(Result, CurPtr);
+
+ case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz").
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ // Wide string literal.
+ if (Char == '"')
+ return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ true);
+
+ // Wide character constant.
+ if (Char == '\'')
+ return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
+ // FALL THROUGH, treating L like the start of an identifier.
+
+ // C99 6.4.2: Identifiers.
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
+ case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
+ case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U':
+ case 'V': case 'W': case 'X': case 'Y': case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
+ case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
+ case 'o': case 'p': case 'q': case 'r': case 's': case 't': case 'u':
+ case 'v': case 'w': case 'x': case 'y': case 'z':
+ case '_':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexIdentifier(Result, CurPtr);
+
+ case '$': // $ in identifiers.
+ if (Features.DollarIdents) {
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::ext_dollar_in_identifier);
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexIdentifier(Result, CurPtr);
+ }
+
+ Kind = tok::unknown;
+ break;
+
+ // C99 6.4.4: Character Constants.
+ case '\'':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexCharConstant(Result, CurPtr);
+
+ // C99 6.4.5: String Literals.
+ case '"':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexStringLiteral(Result, CurPtr, false);
+
+ // C99 6.4.6: Punctuators.
+ case '?':
+ Kind = tok::question;
+ break;
+ case '[':
+ Kind = tok::l_square;
+ break;
+ case ']':
+ Kind = tok::r_square;
+ break;
+ case '(':
+ Kind = tok::l_paren;
+ break;
+ case ')':
+ Kind = tok::r_paren;
+ break;
+ case '{':
+ Kind = tok::l_brace;
+ break;
+ case '}':
+ Kind = tok::r_brace;
+ break;
+ case '.':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char >= '0' && Char <= '9') {
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
+ } else if (Features.CPlusPlus && Char == '*') {
+ Kind = tok::periodstar;
+ CurPtr += SizeTmp;
+ } else if (Char == '.' &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
+ Kind = tok::ellipsis;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else {
+ Kind = tok::period;
+ }
+ break;
+ case '&':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '&') {
+ Kind = tok::ampamp;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '=') {
+ Kind = tok::ampequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::amp;
+ }
+ break;
+ case '*':
+ if (getCharAndSize(CurPtr, SizeTmp) == '=') {
+ Kind = tok::starequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::star;
+ }
+ break;
+ case '+':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '+') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::plusplus;
+ } else if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::plusequal;
+ } else {
+ Kind = tok::plus;
+ }
+ break;
+ case '-':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '-') { // --
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::minusminus;
+ } else if (Char == '>' && Features.CPlusPlus &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->*
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ Kind = tok::arrowstar;
+ } else if (Char == '>') { // ->
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::arrow;
+ } else if (Char == '=') { // -=
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::minusequal;
+ } else {
+ Kind = tok::minus;
+ }
+ break;
+ case '~':
+ Kind = tok::tilde;
+ break;
+ case '!':
+ if (getCharAndSize(CurPtr, SizeTmp) == '=') {
+ Kind = tok::exclaimequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::exclaim;
+ }
+ break;
+ case '/':
+ // 6.4.9: Comments
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '/') { // BCPL comment.
+ // Even if BCPL comments are disabled (e.g. in C89 mode), we generally
+ // want to lex this as a comment. There is one problem with this though,
+ // that in one particular corner case, this can change the behavior of the
+ // resultant program. For example, In "foo //**/ bar", C89 would lex
+ // this as "foo / bar" and langauges with BCPL comments would lex it as
+ // "foo". Check to see if the character after the second slash is a '*'.
+ // If so, we will lex that as a "/" instead of the start of a comment.
+ if (Features.BCPLComment ||
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') {
+ if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
+ return; // KeepCommentMode
+
+ // It is common for the tokens immediately after a // comment to be
+ // whitespace (indentation for the next line). Instead of going through
+ // the big switch, handle it efficiently now.
+ goto SkipIgnoredUnits;
+ }
+ }
+
+ if (Char == '*') { // /**/ comment.
+ if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
+ return; // KeepCommentMode
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ }
+
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::slashequal;
+ } else {
+ Kind = tok::slash;
+ }
+ break;
+ case '%':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ Kind = tok::percentequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Features.Digraphs && Char == '>') {
+ Kind = tok::r_brace; // '%>' -> '}'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Features.Digraphs && Char == ':') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
+ Kind = tok::hashhash; // '%:%:' -> '##'
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else if (Char == '@' && Features.Microsoft) { // %:@ -> #@ -> Charize
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::charize_microsoft_ext);
+ Kind = tok::hashat;
+ } else { // '%:' -> '#'
+ // We parsed a # character. If this occurs at the start of the line,
+ // it's actually the start of a preprocessing directive. Callback to
+ // the preprocessor to handle it.
+ // FIXME: -fpreprocessed mode??
+ if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) {
+ FormTokenWithChars(Result, CurPtr, tok::hash);
+ PP->HandleDirective(Result);
+
+ // As an optimization, if the preprocessor didn't switch lexers, tail
+ // recurse.
+ if (PP->isCurrentLexer(this)) {
+ // Start a new token. If this is a #include or something, the PP may
+ // want us starting at the beginning of the line again. If so, set
+ // the StartOfLine flag.
+ if (IsAtStartOfLine) {
+ Result.setFlag(Token::StartOfLine);
+ IsAtStartOfLine = false;
+ }
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ }
+
+ return PP->Lex(Result);
+ }
+
+ Kind = tok::hash;
+ }
+ } else {
+ Kind = tok::percent;
+ }
+ break;
+ case '<':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (ParsingFilename) {
+ return LexAngledStringLiteral(Result, CurPtr);
+ } else if (Char == '<' &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
+ Kind = tok::lesslessequal;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else if (Char == '<') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::lessless;
+ } else if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::lessequal;
+ } else if (Features.Digraphs && Char == ':') { // '<:' -> '['
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::l_square;
+ } else if (Features.Digraphs && Char == '%') { // '<%' -> '{'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::l_brace;
+ } else {
+ Kind = tok::less;
+ }
+ break;
+ case '>':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::greaterequal;
+ } else if (Char == '>' &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '=') {
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ Kind = tok::greatergreaterequal;
+ } else if (Char == '>') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::greatergreater;
+ } else {
+ Kind = tok::greater;
+ }
+ break;
+ case '^':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::caretequal;
+ } else {
+ Kind = tok::caret;
+ }
+ break;
+ case '|':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ Kind = tok::pipeequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '|') {
+ Kind = tok::pipepipe;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::pipe;
+ }
+ break;
+ case ':':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Features.Digraphs && Char == '>') {
+ Kind = tok::r_square; // ':>' -> ']'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Features.CPlusPlus && Char == ':') {
+ Kind = tok::coloncolon;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::colon;
+ }
+ break;
+ case ';':
+ Kind = tok::semi;
+ break;
+ case '=':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ Kind = tok::equalequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::equal;
+ }
+ break;
+ case ',':
+ Kind = tok::comma;
+ break;
+ case '#':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '#') {
+ Kind = tok::hashhash;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '@' && Features.Microsoft) { // #@ -> Charize
+ Kind = tok::hashat;
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::charize_microsoft_ext);
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ // We parsed a # character. If this occurs at the start of the line,
+ // it's actually the start of a preprocessing directive. Callback to
+ // the preprocessor to handle it.
+ // FIXME: -fpreprocessed mode??
+ if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) {
+ FormTokenWithChars(Result, CurPtr, tok::hash);
+ PP->HandleDirective(Result);
+
+ // As an optimization, if the preprocessor didn't switch lexers, tail
+ // recurse.
+ if (PP->isCurrentLexer(this)) {
+ // Start a new token. If this is a #include or something, the PP may
+ // want us starting at the beginning of the line again. If so, set
+ // the StartOfLine flag.
+ if (IsAtStartOfLine) {
+ Result.setFlag(Token::StartOfLine);
+ IsAtStartOfLine = false;
+ }
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ }
+ return PP->Lex(Result);
+ }
+
+ Kind = tok::hash;
+ }
+ break;
+
+ case '@':
+ // Objective C support.
+ if (CurPtr[-1] == '@' && Features.ObjC1)
+ Kind = tok::at;
+ else
+ Kind = tok::unknown;
+ break;
+
+ case '\\':
+ // FIXME: UCN's.
+ // FALL THROUGH.
+ default:
+ Kind = tok::unknown;
+ break;
+ }
+
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ // Update the location of token as well as BufferPtr.
+ FormTokenWithChars(Result, CurPtr, Kind);
+}
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
new file mode 100644
index 0000000..0324c0b
--- /dev/null
+++ b/lib/Lex/LiteralSupport.cpp
@@ -0,0 +1,929 @@
+//===--- LiteralSupport.cpp - Code to parse and process literals ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the NumericLiteralParser, CharLiteralParser, and
+// StringLiteralParser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+
+/// HexDigitValue - Return the value of the specified hex digit, or -1 if it's
+/// not valid.
+static int HexDigitValue(char C) {
+ if (C >= '0' && C <= '9') return C-'0';
+ if (C >= 'a' && C <= 'f') return C-'a'+10;
+ if (C >= 'A' && C <= 'F') return C-'A'+10;
+ return -1;
+}
+
+/// ProcessCharEscape - Parse a standard C escape sequence, which can occur in
+/// either a character or a string literal.
+static unsigned ProcessCharEscape(const char *&ThisTokBuf,
+ const char *ThisTokEnd, bool &HadError,
+ SourceLocation Loc, bool IsWide,
+ Preprocessor &PP) {
+ // Skip the '\' char.
+ ++ThisTokBuf;
+
+ // We know that this character can't be off the end of the buffer, because
+ // that would have been \", which would not have been the end of string.
+ unsigned ResultChar = *ThisTokBuf++;
+ switch (ResultChar) {
+ // These map to themselves.
+ case '\\': case '\'': case '"': case '?': break;
+
+ // These have fixed mappings.
+ case 'a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ ResultChar = 7;
+ break;
+ case 'b':
+ ResultChar = 8;
+ break;
+ case 'e':
+ PP.Diag(Loc, diag::ext_nonstandard_escape) << "e";
+ ResultChar = 27;
+ break;
+ case 'f':
+ ResultChar = 12;
+ break;
+ case 'n':
+ ResultChar = 10;
+ break;
+ case 'r':
+ ResultChar = 13;
+ break;
+ case 't':
+ ResultChar = 9;
+ break;
+ case 'v':
+ ResultChar = 11;
+ break;
+ case 'x': { // Hex escape.
+ ResultChar = 0;
+ if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
+ PP.Diag(Loc, diag::err_hex_escape_no_digits);
+ HadError = 1;
+ break;
+ }
+
+ // Hex escapes are a maximal series of hex digits.
+ bool Overflow = false;
+ for (; ThisTokBuf != ThisTokEnd; ++ThisTokBuf) {
+ int CharVal = HexDigitValue(ThisTokBuf[0]);
+ if (CharVal == -1) break;
+ // About to shift out a digit?
+ Overflow |= (ResultChar & 0xF0000000) ? true : false;
+ ResultChar <<= 4;
+ ResultChar |= CharVal;
+ }
+
+ // See if any bits will be truncated when evaluated as a character.
+ unsigned CharWidth = PP.getTargetInfo().getCharWidth(IsWide);
+
+ if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
+ Overflow = true;
+ ResultChar &= ~0U >> (32-CharWidth);
+ }
+
+ // Check for overflow.
+ if (Overflow) // Too many digits to fit in
+ PP.Diag(Loc, diag::warn_hex_escape_too_large);
+ break;
+ }
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7': {
+ // Octal escapes.
+ --ThisTokBuf;
+ ResultChar = 0;
+
+ // Octal escapes are a series of octal digits with maximum length 3.
+ // "\0123" is a two digit sequence equal to "\012" "3".
+ unsigned NumDigits = 0;
+ do {
+ ResultChar <<= 3;
+ ResultChar |= *ThisTokBuf++ - '0';
+ ++NumDigits;
+ } while (ThisTokBuf != ThisTokEnd && NumDigits < 3 &&
+ ThisTokBuf[0] >= '0' && ThisTokBuf[0] <= '7');
+
+ // Check for overflow. Reject '\777', but not L'\777'.
+ unsigned CharWidth = PP.getTargetInfo().getCharWidth(IsWide);
+
+ if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
+ PP.Diag(Loc, diag::warn_octal_escape_too_large);
+ ResultChar &= ~0U >> (32-CharWidth);
+ }
+ break;
+ }
+
+ // Otherwise, these are not valid escapes.
+ case '(': case '{': case '[': case '%':
+ // GCC accepts these as extensions. We warn about them as such though.
+ PP.Diag(Loc, diag::ext_nonstandard_escape)
+ << std::string()+(char)ResultChar;
+ break;
+ // FALL THROUGH.
+ default:
+ if (isgraph(ThisTokBuf[0]))
+ PP.Diag(Loc, diag::ext_unknown_escape) << std::string()+(char)ResultChar;
+ else
+ PP.Diag(Loc, diag::ext_unknown_escape) << "x"+llvm::utohexstr(ResultChar);
+ break;
+ }
+
+ return ResultChar;
+}
+
+/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
+/// convert the UTF32 to UTF8. This is a subroutine of StringLiteralParser.
+/// When we decide to implement UCN's for character constants and identifiers,
+/// we will likely rework our support for UCN's.
+static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd,
+ char *&ResultBuf, bool &HadError,
+ SourceLocation Loc, bool IsWide, Preprocessor &PP)
+{
+ // FIXME: Add a warning - UCN's are only valid in C++ & C99.
+ // FIXME: Handle wide strings.
+
+ // Save the beginning of the string (for error diagnostics).
+ const char *ThisTokBegin = ThisTokBuf;
+
+ // Skip the '\u' char's.
+ ThisTokBuf += 2;
+
+ if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
+ PP.Diag(Loc, diag::err_ucn_escape_no_digits);
+ HadError = 1;
+ return;
+ }
+ typedef uint32_t UTF32;
+
+ UTF32 UcnVal = 0;
+ unsigned short UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
+ for (; ThisTokBuf != ThisTokEnd && UcnLen; ++ThisTokBuf, UcnLen--) {
+ int CharVal = HexDigitValue(ThisTokBuf[0]);
+ if (CharVal == -1) break;
+ UcnVal <<= 4;
+ UcnVal |= CharVal;
+ }
+ // If we didn't consume the proper number of digits, there is a problem.
+ if (UcnLen) {
+ PP.Diag(PP.AdvanceToTokenCharacter(Loc, ThisTokBuf-ThisTokBegin),
+ diag::err_ucn_escape_incomplete);
+ HadError = 1;
+ return;
+ }
+ // Check UCN constraints (C99 6.4.3p2).
+ if ((UcnVal < 0xa0 &&
+ (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60 )) // $, @, `
+ || (UcnVal >= 0xD800 && UcnVal <= 0xDFFF)
+ || (UcnVal > 0x10FFFF)) /* the maximum legal UTF32 value */ {
+ PP.Diag(Loc, diag::err_ucn_escape_invalid);
+ HadError = 1;
+ return;
+ }
+ // Now that we've parsed/checked the UCN, we convert from UTF32->UTF8.
+ // The conversion below was inspired by:
+ // http://www.unicode.org/Public/PROGRAMS/CVTUTF/ConvertUTF.c
+ // First, we determine how many bytes the result will require.
+ typedef uint8_t UTF8;
+
+ unsigned short bytesToWrite = 0;
+ if (UcnVal < (UTF32)0x80)
+ bytesToWrite = 1;
+ else if (UcnVal < (UTF32)0x800)
+ bytesToWrite = 2;
+ else if (UcnVal < (UTF32)0x10000)
+ bytesToWrite = 3;
+ else
+ bytesToWrite = 4;
+
+ const unsigned byteMask = 0xBF;
+ const unsigned byteMark = 0x80;
+
+ // Once the bits are split out into bytes of UTF8, this is a mask OR-ed
+ // into the first byte, depending on how many bytes follow.
+ static const UTF8 firstByteMark[5] = {
+ 0x00, 0x00, 0xC0, 0xE0, 0xF0
+ };
+ // Finally, we write the bytes into ResultBuf.
+ ResultBuf += bytesToWrite;
+ switch (bytesToWrite) { // note: everything falls through.
+ case 4: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 3: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 2: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 1: *--ResultBuf = (UTF8) (UcnVal | firstByteMark[bytesToWrite]);
+ }
+ // Update the buffer.
+ ResultBuf += bytesToWrite;
+}
+
+
+/// integer-constant: [C99 6.4.4.1]
+/// decimal-constant integer-suffix
+/// octal-constant integer-suffix
+/// hexadecimal-constant integer-suffix
+/// decimal-constant:
+/// nonzero-digit
+/// decimal-constant digit
+/// octal-constant:
+/// 0
+/// octal-constant octal-digit
+/// hexadecimal-constant:
+/// hexadecimal-prefix hexadecimal-digit
+/// hexadecimal-constant hexadecimal-digit
+/// hexadecimal-prefix: one of
+/// 0x 0X
+/// integer-suffix:
+/// unsigned-suffix [long-suffix]
+/// unsigned-suffix [long-long-suffix]
+/// long-suffix [unsigned-suffix]
+/// long-long-suffix [unsigned-sufix]
+/// nonzero-digit:
+/// 1 2 3 4 5 6 7 8 9
+/// octal-digit:
+/// 0 1 2 3 4 5 6 7
+/// hexadecimal-digit:
+/// 0 1 2 3 4 5 6 7 8 9
+/// a b c d e f
+/// A B C D E F
+/// unsigned-suffix: one of
+/// u U
+/// long-suffix: one of
+/// l L
+/// long-long-suffix: one of
+/// ll LL
+///
+/// floating-constant: [C99 6.4.4.2]
+/// TODO: add rules...
+///
+NumericLiteralParser::
+NumericLiteralParser(const char *begin, const char *end,
+ SourceLocation TokLoc, Preprocessor &pp)
+ : PP(pp), ThisTokBegin(begin), ThisTokEnd(end) {
+
+ // This routine assumes that the range begin/end matches the regex for integer
+ // and FP constants (specifically, the 'pp-number' regex), and assumes that
+ // the byte at "*end" is both valid and not part of the regex. Because of
+ // this, it doesn't have to check for 'overscan' in various places.
+ assert(!isalnum(*end) && *end != '.' && *end != '_' &&
+ "Lexer didn't maximally munch?");
+
+ s = DigitsBegin = begin;
+ saw_exponent = false;
+ saw_period = false;
+ isLong = false;
+ isUnsigned = false;
+ isLongLong = false;
+ isFloat = false;
+ isImaginary = false;
+ hadError = false;
+
+ if (*s == '0') { // parse radix
+ ParseNumberStartingWithZero(TokLoc);
+ if (hadError)
+ return;
+ } else { // the first digit is non-zero
+ radix = 10;
+ s = SkipDigits(s);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (isxdigit(*s) && !(*s == 'e' || *s == 'E')) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ diag::err_invalid_decimal_digit) << std::string(s, s+1);
+ hadError = true;
+ return;
+ } else if (*s == '.') {
+ s++;
+ saw_period = true;
+ s = SkipDigits(s);
+ }
+ if ((*s == 'e' || *s == 'E')) { // exponent
+ const char *Exponent = s;
+ s++;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit != s) {
+ s = first_non_digit;
+ } else {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-begin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ }
+ }
+
+ SuffixBegin = s;
+
+ // Parse the suffix. At this point we can classify whether we have an FP or
+ // integer constant.
+ bool isFPConstant = isFloatingLiteral();
+
+ // Loop over all of the characters of the suffix. If we see something bad,
+ // we break out of the loop.
+ for (; s != ThisTokEnd; ++s) {
+ switch (*s) {
+ case 'f': // FP Suffix for "float"
+ case 'F':
+ if (!isFPConstant) break; // Error for integer constant.
+ if (isFloat || isLong) break; // FF, LF invalid.
+ isFloat = true;
+ continue; // Success.
+ case 'u':
+ case 'U':
+ if (isFPConstant) break; // Error for floating constant.
+ if (isUnsigned) break; // Cannot be repeated.
+ isUnsigned = true;
+ continue; // Success.
+ case 'l':
+ case 'L':
+ if (isLong || isLongLong) break; // Cannot be repeated.
+ if (isFloat) break; // LF invalid.
+
+ // Check for long long. The L's need to be adjacent and the same case.
+ if (s+1 != ThisTokEnd && s[1] == s[0]) {
+ if (isFPConstant) break; // long long invalid for floats.
+ isLongLong = true;
+ ++s; // Eat both of them.
+ } else {
+ isLong = true;
+ }
+ continue; // Success.
+ case 'i':
+ if (PP.getLangOptions().Microsoft) {
+ // Allow i8, i16, i32, i64, and i128.
+ if (++s == ThisTokEnd) break;
+ switch (*s) {
+ case '8':
+ s++; // i8 suffix
+ break;
+ case '1':
+ if (++s == ThisTokEnd) break;
+ if (*s == '6') s++; // i16 suffix
+ else if (*s == '2') {
+ if (++s == ThisTokEnd) break;
+ if (*s == '8') s++; // i128 suffix
+ }
+ break;
+ case '3':
+ if (++s == ThisTokEnd) break;
+ if (*s == '2') s++; // i32 suffix
+ break;
+ case '6':
+ if (++s == ThisTokEnd) break;
+ if (*s == '4') s++; // i64 suffix
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ // fall through.
+ case 'I':
+ case 'j':
+ case 'J':
+ if (isImaginary) break; // Cannot be repeated.
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ diag::ext_imaginary_constant);
+ isImaginary = true;
+ continue; // Success.
+ }
+ // If we reached here, there was an error.
+ break;
+ }
+
+ // Report an error if there are any.
+ if (s != ThisTokEnd) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ isFPConstant ? diag::err_invalid_suffix_float_constant :
+ diag::err_invalid_suffix_integer_constant)
+ << std::string(SuffixBegin, ThisTokEnd);
+ hadError = true;
+ return;
+ }
+}
+
+/// ParseNumberStartingWithZero - This method is called when the first character
+/// of the number is found to be a zero. This means it is either an octal
+/// number (like '04') or a hex number ('0x123a') a binary number ('0b1010') or
+/// a floating point number (01239.123e4). Eat the prefix, determining the
+/// radix etc.
+void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
+ assert(s[0] == '0' && "Invalid method call");
+ s++;
+
+ // Handle a hex number like 0x1234.
+ if ((*s == 'x' || *s == 'X') && (isxdigit(s[1]) || s[1] == '.')) {
+ s++;
+ radix = 16;
+ DigitsBegin = s;
+ s = SkipHexDigits(s);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (*s == '.') {
+ s++;
+ saw_period = true;
+ s = SkipHexDigits(s);
+ }
+ // A binary exponent can appear with or with a '.'. If dotted, the
+ // binary exponent is required.
+ if (*s == 'p' || *s == 'P') {
+ const char *Exponent = s;
+ s++;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit == s) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ s = first_non_digit;
+
+ if (!PP.getLangOptions().HexFloats)
+ PP.Diag(TokLoc, diag::ext_hexconstant_invalid);
+ } else if (saw_period) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_hexconstant_requires_exponent);
+ hadError = true;
+ }
+ return;
+ }
+
+ // Handle simple binary numbers 0b01010
+ if (*s == 'b' || *s == 'B') {
+ // 0b101010 is a GCC extension.
+ PP.Diag(TokLoc, diag::ext_binary_literal);
+ ++s;
+ radix = 2;
+ DigitsBegin = s;
+ s = SkipBinaryDigits(s);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (isxdigit(*s)) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_invalid_binary_digit) << std::string(s, s+1);
+ hadError = true;
+ }
+ // Other suffixes will be diagnosed by the caller.
+ return;
+ }
+
+ // For now, the radix is set to 8. If we discover that we have a
+ // floating point constant, the radix will change to 10. Octal floating
+ // point constants are not permitted (only decimal and hexadecimal).
+ radix = 8;
+ DigitsBegin = s;
+ s = SkipOctalDigits(s);
+ if (s == ThisTokEnd)
+ return; // Done, simple octal number like 01234
+
+ // If we have some other non-octal digit that *is* a decimal digit, see if
+ // this is part of a floating point number like 094.123 or 09e1.
+ if (isdigit(*s)) {
+ const char *EndDecimal = SkipDigits(s);
+ if (EndDecimal[0] == '.' || EndDecimal[0] == 'e' || EndDecimal[0] == 'E') {
+ s = EndDecimal;
+ radix = 10;
+ }
+ }
+
+ // If we have a hex digit other than 'e' (which denotes a FP exponent) then
+ // the code is using an incorrect base.
+ if (isxdigit(*s) && *s != 'e' && *s != 'E') {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_invalid_octal_digit) << std::string(s, s+1);
+ hadError = true;
+ return;
+ }
+
+ if (*s == '.') {
+ s++;
+ radix = 10;
+ saw_period = true;
+ s = SkipDigits(s); // Skip suffix.
+ }
+ if (*s == 'e' || *s == 'E') { // exponent
+ const char *Exponent = s;
+ s++;
+ radix = 10;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit != s) {
+ s = first_non_digit;
+ } else {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ }
+}
+
+
+/// GetIntegerValue - Convert this numeric literal value to an APInt that
+/// matches Val's input width. If there is an overflow, set Val to the low bits
+/// of the result and return true. Otherwise, return false.
+bool NumericLiteralParser::GetIntegerValue(llvm::APInt &Val) {
+ // Fast path: Compute a conservative bound on the maximum number of
+ // bits per digit in this radix. If we can't possibly overflow a
+ // uint64 based on that bound then do the simple conversion to
+ // integer. This avoids the expensive overflow checking below, and
+ // handles the common cases that matter (small decimal integers and
+ // hex/octal values which don't overflow).
+ unsigned MaxBitsPerDigit = 1;
+ while ((1U << MaxBitsPerDigit) < radix)
+ MaxBitsPerDigit += 1;
+ if ((SuffixBegin - DigitsBegin) * MaxBitsPerDigit <= 64) {
+ uint64_t N = 0;
+ for (s = DigitsBegin; s != SuffixBegin; ++s)
+ N = N*radix + HexDigitValue(*s);
+
+ // This will truncate the value to Val's input width. Simply check
+ // for overflow by comparing.
+ Val = N;
+ return Val.getZExtValue() != N;
+ }
+
+ Val = 0;
+ s = DigitsBegin;
+
+ llvm::APInt RadixVal(Val.getBitWidth(), radix);
+ llvm::APInt CharVal(Val.getBitWidth(), 0);
+ llvm::APInt OldVal = Val;
+
+ bool OverflowOccurred = false;
+ while (s < SuffixBegin) {
+ unsigned C = HexDigitValue(*s++);
+
+ // If this letter is out of bound for this radix, reject it.
+ assert(C < radix && "NumericLiteralParser ctor should have rejected this");
+
+ CharVal = C;
+
+ // Add the digit to the value in the appropriate radix. If adding in digits
+ // made the value smaller, then this overflowed.
+ OldVal = Val;
+
+ // Multiply by radix, did overflow occur on the multiply?
+ Val *= RadixVal;
+ OverflowOccurred |= Val.udiv(RadixVal) != OldVal;
+
+ // Add value, did overflow occur on the value?
+ // (a + b) ult b <=> overflow
+ Val += CharVal;
+ OverflowOccurred |= Val.ult(CharVal);
+ }
+ return OverflowOccurred;
+}
+
+llvm::APFloat NumericLiteralParser::
+GetFloatValue(const llvm::fltSemantics &Format, bool* isExact) {
+ using llvm::APFloat;
+
+ llvm::SmallVector<char,256> floatChars;
+ for (unsigned i = 0, n = ThisTokEnd-ThisTokBegin; i != n; ++i)
+ floatChars.push_back(ThisTokBegin[i]);
+
+ floatChars.push_back('\0');
+
+ APFloat V (Format, APFloat::fcZero, false);
+ APFloat::opStatus status;
+
+ status = V.convertFromString(&floatChars[0],APFloat::rmNearestTiesToEven);
+
+ if (isExact)
+ *isExact = status == APFloat::opOK;
+
+ return V;
+}
+
+
+CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
+ SourceLocation Loc, Preprocessor &PP) {
+ // At this point we know that the character matches the regex "L?'.*'".
+ HadError = false;
+
+ // Determine if this is a wide character.
+ IsWide = begin[0] == 'L';
+ if (IsWide) ++begin;
+
+ // Skip over the entry quote.
+ assert(begin[0] == '\'' && "Invalid token lexed");
+ ++begin;
+
+ // FIXME: The "Value" is an uint64_t so we can handle char literals of
+ // upto 64-bits.
+ // FIXME: This extensively assumes that 'char' is 8-bits.
+ assert(PP.getTargetInfo().getCharWidth() == 8 &&
+ "Assumes char is 8 bits");
+ assert(PP.getTargetInfo().getIntWidth() <= 64 &&
+ (PP.getTargetInfo().getIntWidth() & 7) == 0 &&
+ "Assumes sizeof(int) on target is <= 64 and a multiple of char");
+ assert(PP.getTargetInfo().getWCharWidth() <= 64 &&
+ "Assumes sizeof(wchar) on target is <= 64");
+
+ // This is what we will use for overflow detection
+ llvm::APInt LitVal(PP.getTargetInfo().getIntWidth(), 0);
+
+ unsigned NumCharsSoFar = 0;
+ while (begin[0] != '\'') {
+ uint64_t ResultChar;
+ if (begin[0] != '\\') // If this is a normal character, consume it.
+ ResultChar = *begin++;
+ else // Otherwise, this is an escape character.
+ ResultChar = ProcessCharEscape(begin, end, HadError, Loc, IsWide, PP);
+
+ // If this is a multi-character constant (e.g. 'abc'), handle it. These are
+ // implementation defined (C99 6.4.4.4p10).
+ if (NumCharsSoFar) {
+ if (IsWide) {
+ // Emulate GCC's (unintentional?) behavior: L'ab' -> L'b'.
+ LitVal = 0;
+ } else {
+ // Narrow character literals act as though their value is concatenated
+ // in this implementation, but warn on overflow.
+ if (LitVal.countLeadingZeros() < 8)
+ PP.Diag(Loc, diag::warn_char_constant_too_large);
+ LitVal <<= 8;
+ }
+ }
+
+ LitVal = LitVal + ResultChar;
+ ++NumCharsSoFar;
+ }
+
+ // If this is the second character being processed, do special handling.
+ if (NumCharsSoFar > 1) {
+ // Warn about discarding the top bits for multi-char wide-character
+ // constants (L'abcd').
+ if (IsWide)
+ PP.Diag(Loc, diag::warn_extraneous_wide_char_constant);
+ else if (NumCharsSoFar != 4)
+ PP.Diag(Loc, diag::ext_multichar_character_literal);
+ else
+ PP.Diag(Loc, diag::ext_four_char_character_literal);
+ IsMultiChar = true;
+ }
+
+ // Transfer the value from APInt to uint64_t
+ Value = LitVal.getZExtValue();
+
+ // If this is a single narrow character, sign extend it (e.g. '\xFF' is "-1")
+ // if 'char' is signed for this target (C99 6.4.4.4p10). Note that multiple
+ // character constants are not sign extended in the this implementation:
+ // '\xFF\xFF' = 65536 and '\x0\xFF' = 255, which matches GCC.
+ if (!IsWide && NumCharsSoFar == 1 && (Value & 128) &&
+ PP.getTargetInfo().isCharSigned())
+ Value = (signed char)Value;
+}
+
+
+/// string-literal: [C99 6.4.5]
+/// " [s-char-sequence] "
+/// L" [s-char-sequence] "
+/// s-char-sequence:
+/// s-char
+/// s-char-sequence s-char
+/// s-char:
+/// any source character except the double quote ",
+/// backslash \, or newline character
+/// escape-character
+/// universal-character-name
+/// escape-character: [C99 6.4.4.4]
+/// \ escape-code
+/// universal-character-name
+/// escape-code:
+/// character-escape-code
+/// octal-escape-code
+/// hex-escape-code
+/// character-escape-code: one of
+/// n t b r f v a
+/// \ ' " ?
+/// octal-escape-code:
+/// octal-digit
+/// octal-digit octal-digit
+/// octal-digit octal-digit octal-digit
+/// hex-escape-code:
+/// x hex-digit
+/// hex-escape-code hex-digit
+/// universal-character-name:
+/// \u hex-quad
+/// \U hex-quad hex-quad
+/// hex-quad:
+/// hex-digit hex-digit hex-digit hex-digit
+///
+StringLiteralParser::
+StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
+ Preprocessor &pp) : PP(pp) {
+ // Scan all of the string portions, remember the max individual token length,
+ // computing a bound on the concatenated string length, and see whether any
+ // piece is a wide-string. If any of the string portions is a wide-string
+ // literal, the result is a wide-string literal [C99 6.4.5p4].
+ MaxTokenLength = StringToks[0].getLength();
+ SizeBound = StringToks[0].getLength()-2; // -2 for "".
+ AnyWide = StringToks[0].is(tok::wide_string_literal);
+
+ hadError = false;
+
+ // Implement Translation Phase #6: concatenation of string literals
+ /// (C99 5.1.1.2p1). The common case is only one string fragment.
+ for (unsigned i = 1; i != NumStringToks; ++i) {
+ // The string could be shorter than this if it needs cleaning, but this is a
+ // reasonable bound, which is all we need.
+ SizeBound += StringToks[i].getLength()-2; // -2 for "".
+
+ // Remember maximum string piece length.
+ if (StringToks[i].getLength() > MaxTokenLength)
+ MaxTokenLength = StringToks[i].getLength();
+
+ // Remember if we see any wide strings.
+ AnyWide |= StringToks[i].is(tok::wide_string_literal);
+ }
+
+ // Include space for the null terminator.
+ ++SizeBound;
+
+ // TODO: K&R warning: "traditional C rejects string constant concatenation"
+
+ // Get the width in bytes of wchar_t. If no wchar_t strings are used, do not
+ // query the target. As such, wchar_tByteWidth is only valid if AnyWide=true.
+ wchar_tByteWidth = ~0U;
+ if (AnyWide) {
+ wchar_tByteWidth = PP.getTargetInfo().getWCharWidth();
+ assert((wchar_tByteWidth & 7) == 0 && "Assumes wchar_t is byte multiple!");
+ wchar_tByteWidth /= 8;
+ }
+
+ // The output buffer size needs to be large enough to hold wide characters.
+ // This is a worst-case assumption which basically corresponds to L"" "long".
+ if (AnyWide)
+ SizeBound *= wchar_tByteWidth;
+
+ // Size the temporary buffer to hold the result string data.
+ ResultBuf.resize(SizeBound);
+
+ // Likewise, but for each string piece.
+ llvm::SmallString<512> TokenBuf;
+ TokenBuf.resize(MaxTokenLength);
+
+ // Loop over all the strings, getting their spelling, and expanding them to
+ // wide strings as appropriate.
+ ResultPtr = &ResultBuf[0]; // Next byte to fill in.
+
+ Pascal = false;
+
+ for (unsigned i = 0, e = NumStringToks; i != e; ++i) {
+ const char *ThisTokBuf = &TokenBuf[0];
+ // Get the spelling of the token, which eliminates trigraphs, etc. We know
+ // that ThisTokBuf points to a buffer that is big enough for the whole token
+ // and 'spelled' tokens can only shrink.
+ unsigned ThisTokLen = PP.getSpelling(StringToks[i], ThisTokBuf);
+ const char *ThisTokEnd = ThisTokBuf+ThisTokLen-1; // Skip end quote.
+
+ // TODO: Input character set mapping support.
+
+ // Skip L marker for wide strings.
+ bool ThisIsWide = false;
+ if (ThisTokBuf[0] == 'L') {
+ ++ThisTokBuf;
+ ThisIsWide = true;
+ }
+
+ assert(ThisTokBuf[0] == '"' && "Expected quote, lexer broken?");
+ ++ThisTokBuf;
+
+ // Check if this is a pascal string
+ if (pp.getLangOptions().PascalStrings && ThisTokBuf + 1 != ThisTokEnd &&
+ ThisTokBuf[0] == '\\' && ThisTokBuf[1] == 'p') {
+
+ // If the \p sequence is found in the first token, we have a pascal string
+ // Otherwise, if we already have a pascal string, ignore the first \p
+ if (i == 0) {
+ ++ThisTokBuf;
+ Pascal = true;
+ } else if (Pascal)
+ ThisTokBuf += 2;
+ }
+
+ while (ThisTokBuf != ThisTokEnd) {
+ // Is this a span of non-escape characters?
+ if (ThisTokBuf[0] != '\\') {
+ const char *InStart = ThisTokBuf;
+ do {
+ ++ThisTokBuf;
+ } while (ThisTokBuf != ThisTokEnd && ThisTokBuf[0] != '\\');
+
+ // Copy the character span over.
+ unsigned Len = ThisTokBuf-InStart;
+ if (!AnyWide) {
+ memcpy(ResultPtr, InStart, Len);
+ ResultPtr += Len;
+ } else {
+ // Note: our internal rep of wide char tokens is always little-endian.
+ for (; Len; --Len, ++InStart) {
+ *ResultPtr++ = InStart[0];
+ // Add zeros at the end.
+ for (unsigned i = 1, e = wchar_tByteWidth; i != e; ++i)
+ *ResultPtr++ = 0;
+ }
+ }
+ continue;
+ }
+ // Is this a Universal Character Name escape?
+ if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
+ ProcessUCNEscape(ThisTokBuf, ThisTokEnd, ResultPtr,
+ hadError, StringToks[i].getLocation(), ThisIsWide, PP);
+ continue;
+ }
+ // Otherwise, this is a non-UCN escape character. Process it.
+ unsigned ResultChar = ProcessCharEscape(ThisTokBuf, ThisTokEnd, hadError,
+ StringToks[i].getLocation(),
+ ThisIsWide, PP);
+
+ // Note: our internal rep of wide char tokens is always little-endian.
+ *ResultPtr++ = ResultChar & 0xFF;
+
+ if (AnyWide) {
+ for (unsigned i = 1, e = wchar_tByteWidth; i != e; ++i)
+ *ResultPtr++ = ResultChar >> i*8;
+ }
+ }
+ }
+
+ if (Pascal) {
+ ResultBuf[0] = ResultPtr-&ResultBuf[0]-1;
+
+ // Verify that pascal strings aren't too large.
+ if (GetStringLength() > 256) {
+ PP.Diag(StringToks[0].getLocation(), diag::err_pascal_string_too_long)
+ << SourceRange(StringToks[0].getLocation(),
+ StringToks[NumStringToks-1].getLocation());
+ hadError = 1;
+ return;
+ }
+ }
+}
+
+
+/// getOffsetOfStringByte - This function returns the offset of the
+/// specified byte of the string data represented by Token. This handles
+/// advancing over escape sequences in the string.
+unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
+ unsigned ByteNo,
+ Preprocessor &PP) {
+ // Get the spelling of the token.
+ llvm::SmallString<16> SpellingBuffer;
+ SpellingBuffer.resize(Tok.getLength());
+
+ const char *SpellingPtr = &SpellingBuffer[0];
+ unsigned TokLen = PP.getSpelling(Tok, SpellingPtr);
+
+ assert(SpellingPtr[0] != 'L' && "Doesn't handle wide strings yet");
+
+
+ const char *SpellingStart = SpellingPtr;
+ const char *SpellingEnd = SpellingPtr+TokLen;
+
+ // Skip over the leading quote.
+ assert(SpellingPtr[0] == '"' && "Should be a string literal!");
+ ++SpellingPtr;
+
+ // Skip over bytes until we find the offset we're looking for.
+ while (ByteNo) {
+ assert(SpellingPtr < SpellingEnd && "Didn't find byte offset!");
+
+ // Step over non-escapes simply.
+ if (*SpellingPtr != '\\') {
+ ++SpellingPtr;
+ --ByteNo;
+ continue;
+ }
+
+ // Otherwise, this is an escape character. Advance over it.
+ bool HadError = false;
+ ProcessCharEscape(SpellingPtr, SpellingEnd, HadError,
+ Tok.getLocation(), false, PP);
+ assert(!HadError && "This method isn't valid on erroneous strings");
+ --ByteNo;
+ }
+
+ return SpellingPtr-SpellingStart;
+}
diff --git a/lib/Lex/MacroArgs.cpp b/lib/Lex/MacroArgs.cpp
new file mode 100644
index 0000000..cba69b7
--- /dev/null
+++ b/lib/Lex/MacroArgs.cpp
@@ -0,0 +1,240 @@
+//===--- TokenLexer.cpp - Lex from a token stream -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+using namespace clang;
+
+/// MacroArgs ctor function - This destroys the vector passed in.
+MacroArgs *MacroArgs::create(const MacroInfo *MI,
+ const Token *UnexpArgTokens,
+ unsigned NumToks, bool VarargsElided) {
+ assert(MI->isFunctionLike() &&
+ "Can't have args for an object-like macro!");
+
+ // Allocate memory for the MacroArgs object with the lexer tokens at the end.
+ MacroArgs *Result = (MacroArgs*)malloc(sizeof(MacroArgs) +
+ NumToks*sizeof(Token));
+ // Construct the macroargs object.
+ new (Result) MacroArgs(NumToks, VarargsElided);
+
+ // Copy the actual unexpanded tokens to immediately after the result ptr.
+ if (NumToks)
+ memcpy(const_cast<Token*>(Result->getUnexpArgument(0)),
+ UnexpArgTokens, NumToks*sizeof(Token));
+
+ return Result;
+}
+
+/// destroy - Destroy and deallocate the memory for this object.
+///
+void MacroArgs::destroy() {
+ // Run the dtor to deallocate the vectors.
+ this->~MacroArgs();
+ // Release the memory for the object.
+ free(this);
+}
+
+
+/// getArgLength - Given a pointer to an expanded or unexpanded argument,
+/// return the number of tokens, not counting the EOF, that make up the
+/// argument.
+unsigned MacroArgs::getArgLength(const Token *ArgPtr) {
+ unsigned NumArgTokens = 0;
+ for (; ArgPtr->isNot(tok::eof); ++ArgPtr)
+ ++NumArgTokens;
+ return NumArgTokens;
+}
+
+
+/// getUnexpArgument - Return the unexpanded tokens for the specified formal.
+///
+const Token *MacroArgs::getUnexpArgument(unsigned Arg) const {
+ // The unexpanded argument tokens start immediately after the MacroArgs object
+ // in memory.
+ const Token *Start = (const Token *)(this+1);
+ const Token *Result = Start;
+ // Scan to find Arg.
+ for (; Arg; ++Result) {
+ assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
+ if (Result->is(tok::eof))
+ --Arg;
+ }
+ assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
+ return Result;
+}
+
+
+/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
+/// by pre-expansion, return false. Otherwise, conservatively return true.
+bool MacroArgs::ArgNeedsPreexpansion(const Token *ArgTok,
+ Preprocessor &PP) const {
+ // If there are no identifiers in the argument list, or if the identifiers are
+ // known to not be macros, pre-expansion won't modify it.
+ for (; ArgTok->isNot(tok::eof); ++ArgTok)
+ if (IdentifierInfo *II = ArgTok->getIdentifierInfo()) {
+ if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled())
+ // Return true even though the macro could be a function-like macro
+ // without a following '(' token.
+ return true;
+ }
+ return false;
+}
+
+/// getPreExpArgument - Return the pre-expanded form of the specified
+/// argument.
+const std::vector<Token> &
+MacroArgs::getPreExpArgument(unsigned Arg, Preprocessor &PP) {
+ assert(Arg < NumUnexpArgTokens && "Invalid argument number!");
+
+ // If we have already computed this, return it.
+ if (PreExpArgTokens.empty())
+ PreExpArgTokens.resize(NumUnexpArgTokens);
+
+ std::vector<Token> &Result = PreExpArgTokens[Arg];
+ if (!Result.empty()) return Result;
+
+ const Token *AT = getUnexpArgument(Arg);
+ unsigned NumToks = getArgLength(AT)+1; // Include the EOF.
+
+ // Otherwise, we have to pre-expand this argument, populating Result. To do
+ // this, we set up a fake TokenLexer to lex from the unexpanded argument
+ // list. With this installed, we lex expanded tokens until we hit the EOF
+ // token at the end of the unexp list.
+ PP.EnterTokenStream(AT, NumToks, false /*disable expand*/,
+ false /*owns tokens*/);
+
+ // Lex all of the macro-expanded tokens into Result.
+ do {
+ Result.push_back(Token());
+ Token &Tok = Result.back();
+ PP.Lex(Tok);
+ } while (Result.back().isNot(tok::eof));
+
+ // Pop the token stream off the top of the stack. We know that the internal
+ // pointer inside of it is to the "end" of the token stream, but the stack
+ // will not otherwise be popped until the next token is lexed. The problem is
+ // that the token may be lexed sometime after the vector of tokens itself is
+ // destroyed, which would be badness.
+ PP.RemoveTopOfLexerStack();
+ return Result;
+}
+
+
+/// StringifyArgument - Implement C99 6.10.3.2p2, converting a sequence of
+/// tokens into the literal string token that should be produced by the C #
+/// preprocessor operator. If Charify is true, then it should be turned into
+/// a character literal for the Microsoft charize (#@) extension.
+///
+Token MacroArgs::StringifyArgument(const Token *ArgToks,
+ Preprocessor &PP, bool Charify) {
+ Token Tok;
+ Tok.startToken();
+ Tok.setKind(tok::string_literal);
+
+ const Token *ArgTokStart = ArgToks;
+
+ // Stringify all the tokens.
+ llvm::SmallString<128> Result;
+ Result += "\"";
+
+ bool isFirst = true;
+ for (; ArgToks->isNot(tok::eof); ++ArgToks) {
+ const Token &Tok = *ArgToks;
+ if (!isFirst && (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()))
+ Result += ' ';
+ isFirst = false;
+
+ // If this is a string or character constant, escape the token as specified
+ // by 6.10.3.2p2.
+ if (Tok.is(tok::string_literal) || // "foo"
+ Tok.is(tok::wide_string_literal) || // L"foo"
+ Tok.is(tok::char_constant)) { // 'x' and L'x'.
+ std::string Str = Lexer::Stringify(PP.getSpelling(Tok));
+ Result.append(Str.begin(), Str.end());
+ } else {
+ // Otherwise, just append the token. Do some gymnastics to get the token
+ // in place and avoid copies where possible.
+ unsigned CurStrLen = Result.size();
+ Result.resize(CurStrLen+Tok.getLength());
+ const char *BufPtr = &Result[CurStrLen];
+ unsigned ActualTokLen = PP.getSpelling(Tok, BufPtr);
+
+ // If getSpelling returned a pointer to an already uniqued version of the
+ // string instead of filling in BufPtr, memcpy it onto our string.
+ if (BufPtr != &Result[CurStrLen])
+ memcpy(&Result[CurStrLen], BufPtr, ActualTokLen);
+
+ // If the token was dirty, the spelling may be shorter than the token.
+ if (ActualTokLen != Tok.getLength())
+ Result.resize(CurStrLen+ActualTokLen);
+ }
+ }
+
+ // If the last character of the string is a \, and if it isn't escaped, this
+ // is an invalid string literal, diagnose it as specified in C99.
+ if (Result.back() == '\\') {
+ // Count the number of consequtive \ characters. If even, then they are
+ // just escaped backslashes, otherwise it's an error.
+ unsigned FirstNonSlash = Result.size()-2;
+ // Guaranteed to find the starting " if nothing else.
+ while (Result[FirstNonSlash] == '\\')
+ --FirstNonSlash;
+ if ((Result.size()-1-FirstNonSlash) & 1) {
+ // Diagnose errors for things like: #define F(X) #X / F(\)
+ PP.Diag(ArgToks[-1], diag::pp_invalid_string_literal);
+ Result.pop_back(); // remove one of the \'s.
+ }
+ }
+ Result += '"';
+
+ // If this is the charify operation and the result is not a legal character
+ // constant, diagnose it.
+ if (Charify) {
+ // First step, turn double quotes into single quotes:
+ Result[0] = '\'';
+ Result[Result.size()-1] = '\'';
+
+ // Check for bogus character.
+ bool isBad = false;
+ if (Result.size() == 3)
+ isBad = Result[1] == '\''; // ''' is not legal. '\' already fixed above.
+ else
+ isBad = (Result.size() != 4 || Result[1] != '\\'); // Not '\x'
+
+ if (isBad) {
+ PP.Diag(ArgTokStart[0], diag::err_invalid_character_to_charify);
+ Result = "' '"; // Use something arbitrary, but legal.
+ }
+ }
+
+ PP.CreateString(&Result[0], Result.size(), Tok);
+ return Tok;
+}
+
+/// getStringifiedArgument - Compute, cache, and return the specified argument
+/// that has been 'stringified' as required by the # operator.
+const Token &MacroArgs::getStringifiedArgument(unsigned ArgNo,
+ Preprocessor &PP) {
+ assert(ArgNo < NumUnexpArgTokens && "Invalid argument number!");
+ if (StringifiedArgs.empty()) {
+ StringifiedArgs.resize(getNumArguments());
+ memset(&StringifiedArgs[0], 0,
+ sizeof(StringifiedArgs[0])*getNumArguments());
+ }
+ if (StringifiedArgs[ArgNo].isNot(tok::string_literal))
+ StringifiedArgs[ArgNo] = StringifyArgument(getUnexpArgument(ArgNo), PP);
+ return StringifiedArgs[ArgNo];
+}
diff --git a/lib/Lex/MacroArgs.h b/lib/Lex/MacroArgs.h
new file mode 100644
index 0000000..4b22fa1
--- /dev/null
+++ b/lib/Lex/MacroArgs.h
@@ -0,0 +1,109 @@
+//===--- MacroArgs.h - Formal argument info for Macros ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MacroArgs interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_MACROARGS_H
+#define LLVM_CLANG_MACROARGS_H
+
+#include <vector>
+
+namespace clang {
+ class MacroInfo;
+ class Preprocessor;
+ class Token;
+
+/// MacroArgs - An instance of this class captures information about
+/// the formal arguments specified to a function-like macro invocation.
+class MacroArgs {
+ /// NumUnexpArgTokens - The number of raw, unexpanded tokens for the
+ /// arguments. All of the actual argument tokens are allocated immediately
+ /// after the MacroArgs object in memory. This is all of the arguments
+ /// concatenated together, with 'EOF' markers at the end of each argument.
+ unsigned NumUnexpArgTokens;
+
+ /// PreExpArgTokens - Pre-expanded tokens for arguments that need them. Empty
+ /// if not yet computed. This includes the EOF marker at the end of the
+ /// stream.
+ std::vector<std::vector<Token> > PreExpArgTokens;
+
+ /// StringifiedArgs - This contains arguments in 'stringified' form. If the
+ /// stringified form of an argument has not yet been computed, this is empty.
+ std::vector<Token> StringifiedArgs;
+
+ /// VarargsElided - True if this is a C99 style varargs macro invocation and
+ /// there was no argument specified for the "..." argument. If the argument
+ /// was specified (even empty) or this isn't a C99 style varargs function, or
+ /// if in strict mode and the C99 varargs macro had only a ... argument, this
+ /// is false.
+ bool VarargsElided;
+
+ MacroArgs(unsigned NumToks, bool varargsElided)
+ : NumUnexpArgTokens(NumToks), VarargsElided(varargsElided) {}
+ ~MacroArgs() {}
+public:
+ /// MacroArgs ctor function - Create a new MacroArgs object with the specified
+ /// macro and argument info.
+ static MacroArgs *create(const MacroInfo *MI,
+ const Token *UnexpArgTokens,
+ unsigned NumArgTokens, bool VarargsElided);
+
+ /// destroy - Destroy and deallocate the memory for this object.
+ ///
+ void destroy();
+
+ /// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
+ /// by pre-expansion, return false. Otherwise, conservatively return true.
+ bool ArgNeedsPreexpansion(const Token *ArgTok, Preprocessor &PP) const;
+
+ /// getUnexpArgument - Return a pointer to the first token of the unexpanded
+ /// token list for the specified formal.
+ ///
+ const Token *getUnexpArgument(unsigned Arg) const;
+
+ /// getArgLength - Given a pointer to an expanded or unexpanded argument,
+ /// return the number of tokens, not counting the EOF, that make up the
+ /// argument.
+ static unsigned getArgLength(const Token *ArgPtr);
+
+ /// getPreExpArgument - Return the pre-expanded form of the specified
+ /// argument.
+ const std::vector<Token> &
+ getPreExpArgument(unsigned Arg, Preprocessor &PP);
+
+ /// getStringifiedArgument - Compute, cache, and return the specified argument
+ /// that has been 'stringified' as required by the # operator.
+ const Token &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP);
+
+ /// getNumArguments - Return the number of arguments passed into this macro
+ /// invocation.
+ unsigned getNumArguments() const { return NumUnexpArgTokens; }
+
+
+ /// isVarargsElidedUse - Return true if this is a C99 style varargs macro
+ /// invocation and there was no argument specified for the "..." argument. If
+ /// the argument was specified (even empty) or this isn't a C99 style varargs
+ /// function, or if in strict mode and the C99 varargs macro had only a ...
+ /// argument, this returns false.
+ bool isVarargsElidedUse() const { return VarargsElided; }
+
+ /// StringifyArgument - Implement C99 6.10.3.2p2, converting a sequence of
+ /// tokens into the literal string token that should be produced by the C #
+ /// preprocessor operator. If Charify is true, then it should be turned into
+ /// a character literal for the Microsoft charize (#@) extension.
+ ///
+ static Token StringifyArgument(const Token *ArgToks,
+ Preprocessor &PP, bool Charify = false);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
new file mode 100644
index 0000000..df89450
--- /dev/null
+++ b/lib/Lex/MacroInfo.cpp
@@ -0,0 +1,75 @@
+//===--- MacroInfo.cpp - Information about #defined identifiers -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MacroInfo interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+MacroInfo::MacroInfo(SourceLocation DefLoc) : Location(DefLoc) {
+ IsFunctionLike = false;
+ IsC99Varargs = false;
+ IsGNUVarargs = false;
+ IsBuiltinMacro = false;
+ IsDisabled = false;
+ IsUsed = true;
+
+ ArgumentList = 0;
+ NumArguments = 0;
+}
+
+/// isIdenticalTo - Return true if the specified macro definition is equal to
+/// this macro in spelling, arguments, and whitespace. This is used to emit
+/// duplicate definition warnings. This implements the rules in C99 6.10.3.
+///
+bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP) const {
+ // Check # tokens in replacement, number of args, and various flags all match.
+ if (ReplacementTokens.size() != Other.ReplacementTokens.size() ||
+ getNumArgs() != Other.getNumArgs() ||
+ isFunctionLike() != Other.isFunctionLike() ||
+ isC99Varargs() != Other.isC99Varargs() ||
+ isGNUVarargs() != Other.isGNUVarargs())
+ return false;
+
+ // Check arguments.
+ for (arg_iterator I = arg_begin(), OI = Other.arg_begin(), E = arg_end();
+ I != E; ++I, ++OI)
+ if (*I != *OI) return false;
+
+ // Check all the tokens.
+ for (unsigned i = 0, e = ReplacementTokens.size(); i != e; ++i) {
+ const Token &A = ReplacementTokens[i];
+ const Token &B = Other.ReplacementTokens[i];
+ if (A.getKind() != B.getKind())
+ return false;
+
+ // If this isn't the first first token, check that the whitespace and
+ // start-of-line characteristics match.
+ if (i != 0 &&
+ (A.isAtStartOfLine() != B.isAtStartOfLine() ||
+ A.hasLeadingSpace() != B.hasLeadingSpace()))
+ return false;
+
+ // If this is an identifier, it is easy.
+ if (A.getIdentifierInfo() || B.getIdentifierInfo()) {
+ if (A.getIdentifierInfo() != B.getIdentifierInfo())
+ return false;
+ continue;
+ }
+
+ // Otherwise, check the spelling.
+ if (PP.getSpelling(A) != PP.getSpelling(B))
+ return false;
+ }
+
+ return true;
+}
diff --git a/lib/Lex/Makefile b/lib/Lex/Makefile
new file mode 100644
index 0000000..a2437da
--- /dev/null
+++ b/lib/Lex/Makefile
@@ -0,0 +1,28 @@
+##===- clang/lib/Lex/Makefile ------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the Lexer library for the C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+include $(LEVEL)/Makefile.config
+
+LIBRARYNAME := clangLex
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+ifeq ($(ARCH),PowerPC)
+CXXFLAGS += -maltivec
+endif
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Lex/PPCaching.cpp b/lib/Lex/PPCaching.cpp
new file mode 100644
index 0000000..53aa09c
--- /dev/null
+++ b/lib/Lex/PPCaching.cpp
@@ -0,0 +1,113 @@
+//===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements pieces of the Preprocessor interface that manage the
+// caching of lexed tokens.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+/// EnableBacktrackAtThisPos - From the point that this method is called, and
+/// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
+/// keeps track of the lexed tokens so that a subsequent Backtrack() call will
+/// make the Preprocessor re-lex the same tokens.
+///
+/// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
+/// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
+/// be combined with the EnableBacktrackAtThisPos calls in reverse order.
+void Preprocessor::EnableBacktrackAtThisPos() {
+ BacktrackPositions.push_back(CachedLexPos);
+ EnterCachingLexMode();
+}
+
+/// CommitBacktrackedTokens - Disable the last EnableBacktrackAtThisPos call.
+void Preprocessor::CommitBacktrackedTokens() {
+ assert(!BacktrackPositions.empty()
+ && "EnableBacktrackAtThisPos was not called!");
+ BacktrackPositions.pop_back();
+}
+
+/// Backtrack - Make Preprocessor re-lex the tokens that were lexed since
+/// EnableBacktrackAtThisPos() was previously called.
+void Preprocessor::Backtrack() {
+ assert(!BacktrackPositions.empty()
+ && "EnableBacktrackAtThisPos was not called!");
+ CachedLexPos = BacktrackPositions.back();
+ BacktrackPositions.pop_back();
+}
+
+void Preprocessor::CachingLex(Token &Result) {
+ if (CachedLexPos < CachedTokens.size()) {
+ Result = CachedTokens[CachedLexPos++];
+ return;
+ }
+
+ ExitCachingLexMode();
+ Lex(Result);
+
+ if (!isBacktrackEnabled()) {
+ // All cached tokens were consumed.
+ CachedTokens.clear();
+ CachedLexPos = 0;
+ return;
+ }
+
+ // We should cache the lexed token.
+
+ EnterCachingLexMode();
+ if (Result.isNot(tok::eof)) {
+ CachedTokens.push_back(Result);
+ ++CachedLexPos;
+ }
+}
+
+void Preprocessor::EnterCachingLexMode() {
+ if (InCachingLexMode())
+ return;
+
+ PushIncludeMacroStack();
+}
+
+
+const Token &Preprocessor::PeekAhead(unsigned N) {
+ assert(CachedLexPos + N > CachedTokens.size() && "Confused caching.");
+ ExitCachingLexMode();
+ for (unsigned C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
+ CachedTokens.push_back(Token());
+ Lex(CachedTokens.back());
+ }
+ EnterCachingLexMode();
+ return CachedTokens.back();
+}
+
+void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
+ assert(Tok.isAnnotation() && "Expected annotation token");
+ assert(CachedLexPos != 0 && "Expected to have some cached tokens");
+ assert(CachedTokens[CachedLexPos-1].getLocation() == Tok.getAnnotationEndLoc()
+ && "The annotation should be until the most recent cached token");
+
+ // Start from the end of the cached tokens list and look for the token
+ // that is the beginning of the annotation token.
+ for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
+ CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
+ if (AnnotBegin->getLocation() == Tok.getLocation()) {
+ assert((BacktrackPositions.empty() || BacktrackPositions.back() < i) &&
+ "The backtrack pos points inside the annotated tokens!");
+ // Replace the cached tokens with the single annotation token.
+ CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos);
+ *AnnotBegin = Tok;
+ CachedLexPos = i;
+ return;
+ }
+ }
+
+ assert(0&&"Didn't find the first token represented by the annotation token!");
+}
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
new file mode 100644
index 0000000..af59ded
--- /dev/null
+++ b/lib/Lex/PPDirectives.cpp
@@ -0,0 +1,1665 @@
+//===--- PPDirectives.cpp - Directive Handling for Preprocessor -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements # directive processing for the Preprocessor.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/APInt.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Utility Methods for Preprocessor Directive Handling.
+//===----------------------------------------------------------------------===//
+
+MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
+ MacroInfo *MI;
+
+ if (!MICache.empty()) {
+ MI = MICache.back();
+ MICache.pop_back();
+ } else
+ MI = (MacroInfo*) BP.Allocate<MacroInfo>();
+ new (MI) MacroInfo(L);
+ return MI;
+}
+
+/// ReleaseMacroInfo - Release the specified MacroInfo. This memory will
+/// be reused for allocating new MacroInfo objects.
+void Preprocessor::ReleaseMacroInfo(MacroInfo* MI) {
+ MICache.push_back(MI);
+ MI->FreeArgumentList(BP);
+}
+
+
+/// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
+/// current line until the tok::eom token is found.
+void Preprocessor::DiscardUntilEndOfDirective() {
+ Token Tmp;
+ do {
+ LexUnexpandedToken(Tmp);
+ } while (Tmp.isNot(tok::eom));
+}
+
+/// ReadMacroName - Lex and validate a macro name, which occurs after a
+/// #define or #undef. This sets the token kind to eom and discards the rest
+/// of the macro line if the macro name is invalid. isDefineUndef is 1 if
+/// this is due to a a #define, 2 if #undef directive, 0 if it is something
+/// else (e.g. #ifdef).
+void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
+ // Read the token, don't allow macro expansion on it.
+ LexUnexpandedToken(MacroNameTok);
+
+ // Missing macro name?
+ if (MacroNameTok.is(tok::eom)) {
+ Diag(MacroNameTok, diag::err_pp_missing_macro_name);
+ return;
+ }
+
+ IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
+ if (II == 0) {
+ std::string Spelling = getSpelling(MacroNameTok);
+ const IdentifierInfo &Info = Identifiers.get(Spelling);
+ if (Info.isCPlusPlusOperatorKeyword())
+ // C++ 2.5p2: Alternative tokens behave the same as its primary token
+ // except for their spellings.
+ Diag(MacroNameTok, diag::err_pp_operator_used_as_macro_name) << Spelling;
+ else
+ Diag(MacroNameTok, diag::err_pp_macro_not_identifier);
+ // Fall through on error.
+ } else if (isDefineUndef && II->getPPKeywordID() == tok::pp_defined) {
+ // Error if defining "defined": C99 6.10.8.4.
+ Diag(MacroNameTok, diag::err_defined_macro_name);
+ } else if (isDefineUndef && II->hasMacroDefinition() &&
+ getMacroInfo(II)->isBuiltinMacro()) {
+ // Error if defining "__LINE__" and other builtins: C99 6.10.8.4.
+ if (isDefineUndef == 1)
+ Diag(MacroNameTok, diag::pp_redef_builtin_macro);
+ else
+ Diag(MacroNameTok, diag::pp_undef_builtin_macro);
+ } else {
+ // Okay, we got a good identifier node. Return it.
+ return;
+ }
+
+ // Invalid macro name, read and discard the rest of the line. Then set the
+ // token kind to tok::eom.
+ MacroNameTok.setKind(tok::eom);
+ return DiscardUntilEndOfDirective();
+}
+
+/// CheckEndOfDirective - Ensure that the next token is a tok::eom token. If
+/// not, emit a diagnostic and consume up until the eom. If EnableMacros is
+/// true, then we consider macros that expand to zero tokens as being ok.
+void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
+ Token Tmp;
+ // Lex unexpanded tokens for most directives: macros might expand to zero
+ // tokens, causing us to miss diagnosing invalid lines. Some directives (like
+ // #line) allow empty macros.
+ if (EnableMacros)
+ Lex(Tmp);
+ else
+ LexUnexpandedToken(Tmp);
+
+ // There should be no tokens after the directive, but we allow them as an
+ // extension.
+ while (Tmp.is(tok::comment)) // Skip comments in -C mode.
+ LexUnexpandedToken(Tmp);
+
+ if (Tmp.isNot(tok::eom)) {
+ // Add a fixit in GNU/C99/C++ mode. Don't offer a fixit for strict-C89,
+ // because it is more trouble than it is worth to insert /**/ and check that
+ // there is no /**/ in the range also.
+ CodeModificationHint FixItHint;
+ if (Features.GNUMode || Features.C99 || Features.CPlusPlus)
+ FixItHint = CodeModificationHint::CreateInsertion(Tmp.getLocation(),"//");
+ Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << FixItHint;
+ DiscardUntilEndOfDirective();
+ }
+}
+
+
+
+/// SkipExcludedConditionalBlock - We just read a #if or related directive and
+/// decided that the subsequent tokens are in the #if'd out portion of the
+/// file. Lex the rest of the file, until we see an #endif. If
+/// FoundNonSkipPortion is true, then we have already emitted code for part of
+/// this #if directive, so #else/#elif blocks should never be entered. If ElseOk
+/// is true, then #else directives are ok, if not, then we have already seen one
+/// so a #else directive is a duplicate. When this returns, the caller can lex
+/// the first valid token.
+void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
+ bool FoundNonSkipPortion,
+ bool FoundElse) {
+ ++NumSkipped;
+ assert(CurTokenLexer == 0 && CurPPLexer && "Lexing a macro, not a file?");
+
+ CurPPLexer->pushConditionalLevel(IfTokenLoc, /*isSkipping*/false,
+ FoundNonSkipPortion, FoundElse);
+
+ if (CurPTHLexer) {
+ PTHSkipExcludedConditionalBlock();
+ return;
+ }
+
+ // Enter raw mode to disable identifier lookup (and thus macro expansion),
+ // disabling warnings, etc.
+ CurPPLexer->LexingRawMode = true;
+ Token Tok;
+ while (1) {
+ if (CurLexer)
+ CurLexer->Lex(Tok);
+ else
+ CurPTHLexer->Lex(Tok);
+
+ // If this is the end of the buffer, we have an error.
+ if (Tok.is(tok::eof)) {
+ // Emit errors for each unterminated conditional on the stack, including
+ // the current one.
+ while (!CurPPLexer->ConditionalStack.empty()) {
+ Diag(CurPPLexer->ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ CurPPLexer->ConditionalStack.pop_back();
+ }
+
+ // Just return and let the caller lex after this #include.
+ break;
+ }
+
+ // If this token is not a preprocessor directive, just skip it.
+ if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
+ continue;
+
+ // We just parsed a # character at the start of a line, so we're in
+ // directive mode. Tell the lexer this so any newlines we see will be
+ // converted into an EOM token (this terminates the macro).
+ CurPPLexer->ParsingPreprocessorDirective = true;
+ if (CurLexer) CurLexer->SetCommentRetentionState(false);
+
+
+ // Read the next token, the directive flavor.
+ LexUnexpandedToken(Tok);
+
+ // If this isn't an identifier directive (e.g. is "# 1\n" or "#\n", or
+ // something bogus), skip it.
+ if (Tok.isNot(tok::identifier)) {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ continue;
+ }
+
+ // If the first letter isn't i or e, it isn't intesting to us. We know that
+ // this is safe in the face of spelling differences, because there is no way
+ // to spell an i/e in a strange way that is another letter. Skipping this
+ // allows us to avoid looking up the identifier info for #define/#undef and
+ // other common directives.
+ const char *RawCharData = SourceMgr.getCharacterData(Tok.getLocation());
+ char FirstChar = RawCharData[0];
+ if (FirstChar >= 'a' && FirstChar <= 'z' &&
+ FirstChar != 'i' && FirstChar != 'e') {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ continue;
+ }
+
+ // Get the identifier name without trigraphs or embedded newlines. Note
+ // that we can't use Tok.getIdentifierInfo() because its lookup is disabled
+ // when skipping.
+ // TODO: could do this with zero copies in the no-clean case by using
+ // strncmp below.
+ char Directive[20];
+ unsigned IdLen;
+ if (!Tok.needsCleaning() && Tok.getLength() < 20) {
+ IdLen = Tok.getLength();
+ memcpy(Directive, RawCharData, IdLen);
+ Directive[IdLen] = 0;
+ } else {
+ std::string DirectiveStr = getSpelling(Tok);
+ IdLen = DirectiveStr.size();
+ if (IdLen >= 20) {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ continue;
+ }
+ memcpy(Directive, &DirectiveStr[0], IdLen);
+ Directive[IdLen] = 0;
+ FirstChar = Directive[0];
+ }
+
+ if (FirstChar == 'i' && Directive[1] == 'f') {
+ if ((IdLen == 2) || // "if"
+ (IdLen == 5 && !strcmp(Directive+2, "def")) || // "ifdef"
+ (IdLen == 6 && !strcmp(Directive+2, "ndef"))) { // "ifndef"
+ // We know the entire #if/#ifdef/#ifndef block will be skipped, don't
+ // bother parsing the condition.
+ DiscardUntilEndOfDirective();
+ CurPPLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
+ /*foundnonskip*/false,
+ /*fnddelse*/false);
+ }
+ } else if (FirstChar == 'e') {
+ if (IdLen == 5 && !strcmp(Directive+1, "ndif")) { // "endif"
+ CheckEndOfDirective("endif");
+ PPConditionalInfo CondInfo;
+ CondInfo.WasSkipping = true; // Silence bogus warning.
+ bool InCond = CurPPLexer->popConditionalLevel(CondInfo);
+ InCond = InCond; // Silence warning in no-asserts mode.
+ assert(!InCond && "Can't be skipping if not in a conditional!");
+
+ // If we popped the outermost skipping block, we're done skipping!
+ if (!CondInfo.WasSkipping)
+ break;
+ } else if (IdLen == 4 && !strcmp(Directive+1, "lse")) { // "else".
+ // #else directive in a skipping conditional. If not in some other
+ // skipping conditional, and if #else hasn't already been seen, enter it
+ // as a non-skipping conditional.
+ DiscardUntilEndOfDirective(); // C99 6.10p4.
+ PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+
+ // If this is a #else with a #else before it, report the error.
+ if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_else_after_else);
+
+ // Note that we've seen a #else in this conditional.
+ CondInfo.FoundElse = true;
+
+ // If the conditional is at the top level, and the #if block wasn't
+ // entered, enter the #else block now.
+ if (!CondInfo.WasSkipping && !CondInfo.FoundNonSkip) {
+ CondInfo.FoundNonSkip = true;
+ break;
+ }
+ } else if (IdLen == 4 && !strcmp(Directive+1, "lif")) { // "elif".
+ PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+
+ bool ShouldEnter;
+ // If this is in a skipping block or if we're already handled this #if
+ // block, don't bother parsing the condition.
+ if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
+ DiscardUntilEndOfDirective();
+ ShouldEnter = false;
+ } else {
+ // Restore the value of LexingRawMode so that identifiers are
+ // looked up, etc, inside the #elif expression.
+ assert(CurPPLexer->LexingRawMode && "We have to be skipping here!");
+ CurPPLexer->LexingRawMode = false;
+ IdentifierInfo *IfNDefMacro = 0;
+ ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
+ CurPPLexer->LexingRawMode = true;
+ }
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
+
+ // If this condition is true, enter it!
+ if (ShouldEnter) {
+ CondInfo.FoundNonSkip = true;
+ break;
+ }
+ }
+ }
+
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ }
+
+ // Finally, if we are out of the conditional (saw an #endif or ran off the end
+ // of the file, just stop skipping and return to lexing whatever came after
+ // the #if block.
+ CurPPLexer->LexingRawMode = false;
+}
+
+void Preprocessor::PTHSkipExcludedConditionalBlock() {
+
+ while(1) {
+ assert(CurPTHLexer);
+ assert(CurPTHLexer->LexingRawMode == false);
+
+ // Skip to the next '#else', '#elif', or #endif.
+ if (CurPTHLexer->SkipBlock()) {
+ // We have reached an #endif. Both the '#' and 'endif' tokens
+ // have been consumed by the PTHLexer. Just pop off the condition level.
+ PPConditionalInfo CondInfo;
+ bool InCond = CurPTHLexer->popConditionalLevel(CondInfo);
+ InCond = InCond; // Silence warning in no-asserts mode.
+ assert(!InCond && "Can't be skipping if not in a conditional!");
+ break;
+ }
+
+ // We have reached a '#else' or '#elif'. Lex the next token to get
+ // the directive flavor.
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ // We can actually look up the IdentifierInfo here since we aren't in
+ // raw mode.
+ tok::PPKeywordKind K = Tok.getIdentifierInfo()->getPPKeywordID();
+
+ if (K == tok::pp_else) {
+ // #else: Enter the else condition. We aren't in a nested condition
+ // since we skip those. We're always in the one matching the last
+ // blocked we skipped.
+ PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
+ // Note that we've seen a #else in this conditional.
+ CondInfo.FoundElse = true;
+
+ // If the #if block wasn't entered then enter the #else block now.
+ if (!CondInfo.FoundNonSkip) {
+ CondInfo.FoundNonSkip = true;
+
+ // Scan until the eom token.
+ CurPTHLexer->ParsingPreprocessorDirective = true;
+ DiscardUntilEndOfDirective();
+ CurPTHLexer->ParsingPreprocessorDirective = false;
+
+ break;
+ }
+
+ // Otherwise skip this block.
+ continue;
+ }
+
+ assert(K == tok::pp_elif);
+ PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CondInfo.FoundElse)
+ Diag(Tok, diag::pp_err_elif_after_else);
+
+ // If this is in a skipping block or if we're already handled this #if
+ // block, don't bother parsing the condition. We just skip this block.
+ if (CondInfo.FoundNonSkip)
+ continue;
+
+ // Evaluate the condition of the #elif.
+ IdentifierInfo *IfNDefMacro = 0;
+ CurPTHLexer->ParsingPreprocessorDirective = true;
+ bool ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
+ CurPTHLexer->ParsingPreprocessorDirective = false;
+
+ // If this condition is true, enter it!
+ if (ShouldEnter) {
+ CondInfo.FoundNonSkip = true;
+ break;
+ }
+
+ // Otherwise, skip this block and go to the next one.
+ continue;
+ }
+}
+
+/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+/// return null on failure. isAngled indicates whether the file reference is
+/// for system #include's or not (i.e. using <> instead of "").
+const FileEntry *Preprocessor::LookupFile(const char *FilenameStart,
+ const char *FilenameEnd,
+ bool isAngled,
+ const DirectoryLookup *FromDir,
+ const DirectoryLookup *&CurDir) {
+ // If the header lookup mechanism may be relative to the current file, pass in
+ // info about where the current file is.
+ const FileEntry *CurFileEnt = 0;
+ if (!FromDir) {
+ FileID FID = getCurrentFileLexer()->getFileID();
+ CurFileEnt = SourceMgr.getFileEntryForID(FID);
+
+ // If there is no file entry associated with this file, it must be the
+ // predefines buffer. Any other file is not lexed with a normal lexer, so
+ // it won't be scanned for preprocessor directives. If we have the
+ // predefines buffer, resolve #include references (which come from the
+ // -include command line argument) as if they came from the main file, this
+ // affects file lookup etc.
+ if (CurFileEnt == 0) {
+ FID = SourceMgr.getMainFileID();
+ CurFileEnt = SourceMgr.getFileEntryForID(FID);
+ }
+ }
+
+ // Do a standard file entry lookup.
+ CurDir = CurDirLookup;
+ const FileEntry *FE =
+ HeaderInfo.LookupFile(FilenameStart, FilenameEnd,
+ isAngled, FromDir, CurDir, CurFileEnt);
+ if (FE) return FE;
+
+ // Otherwise, see if this is a subframework header. If so, this is relative
+ // to one of the headers on the #include stack. Walk the list of the current
+ // headers on the #include stack and pass them to HeaderInfo.
+ if (IsFileLexer()) {
+ if ((CurFileEnt = SourceMgr.getFileEntryForID(CurPPLexer->getFileID())))
+ if ((FE = HeaderInfo.LookupSubframeworkHeader(FilenameStart, FilenameEnd,
+ CurFileEnt)))
+ return FE;
+ }
+
+ for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+ IncludeStackInfo &ISEntry = IncludeMacroStack[e-i-1];
+ if (IsFileLexer(ISEntry)) {
+ if ((CurFileEnt =
+ SourceMgr.getFileEntryForID(ISEntry.ThePPLexer->getFileID())))
+ if ((FE = HeaderInfo.LookupSubframeworkHeader(FilenameStart,
+ FilenameEnd, CurFileEnt)))
+ return FE;
+ }
+ }
+
+ // Otherwise, we really couldn't find the file.
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandleDirective - This callback is invoked when the lexer sees a # token
+/// at the start of a line. This consumes the directive, modifies the
+/// lexer/preprocessor state, and advances the lexer(s) so that the next token
+/// read is the correct one.
+void Preprocessor::HandleDirective(Token &Result) {
+ // FIXME: Traditional: # with whitespace before it not recognized by K&R?
+
+ // We just parsed a # character at the start of a line, so we're in directive
+ // mode. Tell the lexer this so any newlines we see will be converted into an
+ // EOM token (which terminates the directive).
+ CurPPLexer->ParsingPreprocessorDirective = true;
+
+ ++NumDirectives;
+
+ // We are about to read a token. For the multiple-include optimization FA to
+ // work, we have to remember if we had read any tokens *before* this
+ // pp-directive.
+ bool ReadAnyTokensBeforeDirective = CurPPLexer->MIOpt.getHasReadAnyTokensVal();
+
+ // Save the '#' token in case we need to return it later.
+ Token SavedHash = Result;
+
+ // Read the next token, the directive flavor. This isn't expanded due to
+ // C99 6.10.3p8.
+ LexUnexpandedToken(Result);
+
+ // C99 6.10.3p11: Is this preprocessor directive in macro invocation? e.g.:
+ // #define A(x) #x
+ // A(abc
+ // #warning blah
+ // def)
+ // If so, the user is relying on non-portable behavior, emit a diagnostic.
+ if (InMacroArgs)
+ Diag(Result, diag::ext_embedded_directive);
+
+TryAgain:
+ switch (Result.getKind()) {
+ case tok::eom:
+ return; // null directive.
+ case tok::comment:
+ // Handle stuff like "# /*foo*/ define X" in -E -C mode.
+ LexUnexpandedToken(Result);
+ goto TryAgain;
+
+ case tok::numeric_constant: // # 7 GNU line marker directive.
+ if (getLangOptions().AsmPreprocessor)
+ break; // # 4 is not a preprocessor directive in .S files.
+ return HandleDigitDirective(Result);
+ default:
+ IdentifierInfo *II = Result.getIdentifierInfo();
+ if (II == 0) break; // Not an identifier.
+
+ // Ask what the preprocessor keyword ID is.
+ switch (II->getPPKeywordID()) {
+ default: break;
+ // C99 6.10.1 - Conditional Inclusion.
+ case tok::pp_if:
+ return HandleIfDirective(Result, ReadAnyTokensBeforeDirective);
+ case tok::pp_ifdef:
+ return HandleIfdefDirective(Result, false, true/*not valid for miopt*/);
+ case tok::pp_ifndef:
+ return HandleIfdefDirective(Result, true, ReadAnyTokensBeforeDirective);
+ case tok::pp_elif:
+ return HandleElifDirective(Result);
+ case tok::pp_else:
+ return HandleElseDirective(Result);
+ case tok::pp_endif:
+ return HandleEndifDirective(Result);
+
+ // C99 6.10.2 - Source File Inclusion.
+ case tok::pp_include:
+ return HandleIncludeDirective(Result); // Handle #include.
+ case tok::pp___include_macros:
+ return HandleIncludeMacrosDirective(Result); // Handle -imacros.
+
+ // C99 6.10.3 - Macro Replacement.
+ case tok::pp_define:
+ return HandleDefineDirective(Result);
+ case tok::pp_undef:
+ return HandleUndefDirective(Result);
+
+ // C99 6.10.4 - Line Control.
+ case tok::pp_line:
+ return HandleLineDirective(Result);
+
+ // C99 6.10.5 - Error Directive.
+ case tok::pp_error:
+ return HandleUserDiagnosticDirective(Result, false);
+
+ // C99 6.10.6 - Pragma Directive.
+ case tok::pp_pragma:
+ return HandlePragmaDirective();
+
+ // GNU Extensions.
+ case tok::pp_import:
+ return HandleImportDirective(Result);
+ case tok::pp_include_next:
+ return HandleIncludeNextDirective(Result);
+
+ case tok::pp_warning:
+ Diag(Result, diag::ext_pp_warning_directive);
+ return HandleUserDiagnosticDirective(Result, true);
+ case tok::pp_ident:
+ return HandleIdentSCCSDirective(Result);
+ case tok::pp_sccs:
+ return HandleIdentSCCSDirective(Result);
+ case tok::pp_assert:
+ //isExtension = true; // FIXME: implement #assert
+ break;
+ case tok::pp_unassert:
+ //isExtension = true; // FIXME: implement #unassert
+ break;
+ }
+ break;
+ }
+
+ // If this is a .S file, treat unknown # directives as non-preprocessor
+ // directives. This is important because # may be a comment or introduce
+ // various pseudo-ops. Just return the # token and push back the following
+ // token to be lexed next time.
+ if (getLangOptions().AsmPreprocessor) {
+ Token *Toks = new Token[2]();
+ // Return the # and the token after it.
+ Toks[0] = SavedHash;
+ Toks[1] = Result;
+ // Enter this token stream so that we re-lex the tokens. Make sure to
+ // enable macro expansion, in case the token after the # is an identifier
+ // that is expanded.
+ EnterTokenStream(Toks, 2, false, true);
+ return;
+ }
+
+ // If we reached here, the preprocessing token is not valid!
+ Diag(Result, diag::err_pp_invalid_directive);
+
+ // Read the rest of the PP line.
+ DiscardUntilEndOfDirective();
+
+ // Okay, we're done parsing the directive.
+}
+
+/// GetLineValue - Convert a numeric token into an unsigned value, emitting
+/// Diagnostic DiagID if it is invalid, and returning the value in Val.
+static bool GetLineValue(Token &DigitTok, unsigned &Val,
+ unsigned DiagID, Preprocessor &PP) {
+ if (DigitTok.isNot(tok::numeric_constant)) {
+ PP.Diag(DigitTok, DiagID);
+
+ if (DigitTok.isNot(tok::eom))
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ llvm::SmallString<64> IntegerBuffer;
+ IntegerBuffer.resize(DigitTok.getLength());
+ const char *DigitTokBegin = &IntegerBuffer[0];
+ unsigned ActualLength = PP.getSpelling(DigitTok, DigitTokBegin);
+
+ // Verify that we have a simple digit-sequence, and compute the value. This
+ // is always a simple digit string computed in decimal, so we do this manually
+ // here.
+ Val = 0;
+ for (unsigned i = 0; i != ActualLength; ++i) {
+ if (!isdigit(DigitTokBegin[i])) {
+ PP.Diag(PP.AdvanceToTokenCharacter(DigitTok.getLocation(), i),
+ diag::err_pp_line_digit_sequence);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ unsigned NextVal = Val*10+(DigitTokBegin[i]-'0');
+ if (NextVal < Val) { // overflow.
+ PP.Diag(DigitTok, DiagID);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+ Val = NextVal;
+ }
+
+ // Reject 0, this is needed both by #line numbers and flags.
+ if (Val == 0) {
+ PP.Diag(DigitTok, DiagID);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ if (DigitTokBegin[0] == '0')
+ PP.Diag(DigitTok.getLocation(), diag::warn_pp_line_decimal);
+
+ return false;
+}
+
+/// HandleLineDirective - Handle #line directive: C99 6.10.4. The two
+/// acceptable forms are:
+/// # line digit-sequence
+/// # line digit-sequence "s-char-sequence"
+void Preprocessor::HandleLineDirective(Token &Tok) {
+ // Read the line # and string argument. Per C99 6.10.4p5, these tokens are
+ // expanded.
+ Token DigitTok;
+ Lex(DigitTok);
+
+ // Validate the number and convert it to an unsigned.
+ unsigned LineNo;
+ if (GetLineValue(DigitTok, LineNo, diag::err_pp_line_requires_integer,*this))
+ return;
+
+ // Enforce C99 6.10.4p3: "The digit sequence shall not specify ... a
+ // number greater than 2147483647". C90 requires that the line # be <= 32767.
+ unsigned LineLimit = Features.C99 ? 2147483648U : 32768U;
+ if (LineNo >= LineLimit)
+ Diag(DigitTok, diag::ext_pp_line_too_big) << LineLimit;
+
+ int FilenameID = -1;
+ Token StrTok;
+ Lex(StrTok);
+
+ // If the StrTok is "eom", then it wasn't present. Otherwise, it must be a
+ // string followed by eom.
+ if (StrTok.is(tok::eom))
+ ; // ok
+ else if (StrTok.isNot(tok::string_literal)) {
+ Diag(StrTok, diag::err_pp_line_invalid_filename);
+ DiscardUntilEndOfDirective();
+ return;
+ } else {
+ // Parse and validate the string, converting it into a unique ID.
+ StringLiteralParser Literal(&StrTok, 1, *this);
+ assert(!Literal.AnyWide && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return DiscardUntilEndOfDirective();
+ if (Literal.Pascal) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ }
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString(),
+ Literal.GetStringLength());
+
+ // Verify that there is nothing after the string, other than EOM. Because
+ // of C99 6.10.4p5, macros that expand to empty tokens are ok.
+ CheckEndOfDirective("line", true);
+ }
+
+ SourceMgr.AddLineNote(DigitTok.getLocation(), LineNo, FilenameID);
+
+ if (Callbacks)
+ Callbacks->FileChanged(DigitTok.getLocation(), PPCallbacks::RenameFile,
+ SrcMgr::C_User);
+}
+
+/// ReadLineMarkerFlags - Parse and validate any flags at the end of a GNU line
+/// marker directive.
+static bool ReadLineMarkerFlags(bool &IsFileEntry, bool &IsFileExit,
+ bool &IsSystemHeader, bool &IsExternCHeader,
+ Preprocessor &PP) {
+ unsigned FlagVal;
+ Token FlagTok;
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eom)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
+ return true;
+
+ if (FlagVal == 1) {
+ IsFileEntry = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eom)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
+ return true;
+ } else if (FlagVal == 2) {
+ IsFileExit = true;
+
+ SourceManager &SM = PP.getSourceManager();
+ // If we are leaving the current presumed file, check to make sure the
+ // presumed include stack isn't empty!
+ FileID CurFileID =
+ SM.getDecomposedInstantiationLoc(FlagTok.getLocation()).first;
+ PresumedLoc PLoc = SM.getPresumedLoc(FlagTok.getLocation());
+
+ // If there is no include loc (main file) or if the include loc is in a
+ // different physical file, then we aren't in a "1" line marker flag region.
+ SourceLocation IncLoc = PLoc.getIncludeLoc();
+ if (IncLoc.isInvalid() ||
+ SM.getDecomposedInstantiationLoc(IncLoc).first != CurFileID) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_pop);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eom)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
+ return true;
+ }
+
+ // We must have 3 if there are still flags.
+ if (FlagVal != 3) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ IsSystemHeader = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eom)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
+ return true;
+
+ // We must have 4 if there is yet another flag.
+ if (FlagVal != 4) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ IsExternCHeader = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eom)) return false;
+
+ // There are no more valid flags here.
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+}
+
+/// HandleDigitDirective - Handle a GNU line marker directive, whose syntax is
+/// one of the following forms:
+///
+/// # 42
+/// # 42 "file" ('1' | '2')?
+/// # 42 "file" ('1' | '2')? '3' '4'?
+///
+void Preprocessor::HandleDigitDirective(Token &DigitTok) {
+ // Validate the number and convert it to an unsigned. GNU does not have a
+ // line # limit other than it fit in 32-bits.
+ unsigned LineNo;
+ if (GetLineValue(DigitTok, LineNo, diag::err_pp_linemarker_requires_integer,
+ *this))
+ return;
+
+ Token StrTok;
+ Lex(StrTok);
+
+ bool IsFileEntry = false, IsFileExit = false;
+ bool IsSystemHeader = false, IsExternCHeader = false;
+ int FilenameID = -1;
+
+ // If the StrTok is "eom", then it wasn't present. Otherwise, it must be a
+ // string followed by eom.
+ if (StrTok.is(tok::eom))
+ ; // ok
+ else if (StrTok.isNot(tok::string_literal)) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ } else {
+ // Parse and validate the string, converting it into a unique ID.
+ StringLiteralParser Literal(&StrTok, 1, *this);
+ assert(!Literal.AnyWide && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return DiscardUntilEndOfDirective();
+ if (Literal.Pascal) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ }
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString(),
+ Literal.GetStringLength());
+
+ // If a filename was present, read any flags that are present.
+ if (ReadLineMarkerFlags(IsFileEntry, IsFileExit,
+ IsSystemHeader, IsExternCHeader, *this))
+ return;
+ }
+
+ // Create a line note with this information.
+ SourceMgr.AddLineNote(DigitTok.getLocation(), LineNo, FilenameID,
+ IsFileEntry, IsFileExit,
+ IsSystemHeader, IsExternCHeader);
+
+ // If the preprocessor has callbacks installed, notify them of the #line
+ // change. This is used so that the line marker comes out in -E mode for
+ // example.
+ if (Callbacks) {
+ PPCallbacks::FileChangeReason Reason = PPCallbacks::RenameFile;
+ if (IsFileEntry)
+ Reason = PPCallbacks::EnterFile;
+ else if (IsFileExit)
+ Reason = PPCallbacks::ExitFile;
+ SrcMgr::CharacteristicKind FileKind = SrcMgr::C_User;
+ if (IsExternCHeader)
+ FileKind = SrcMgr::C_ExternCSystem;
+ else if (IsSystemHeader)
+ FileKind = SrcMgr::C_System;
+
+ Callbacks->FileChanged(DigitTok.getLocation(), Reason, FileKind);
+ }
+}
+
+
+/// HandleUserDiagnosticDirective - Handle a #warning or #error directive.
+///
+void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
+ bool isWarning) {
+ // PTH doesn't emit #warning or #error directives.
+ if (CurPTHLexer)
+ return CurPTHLexer->DiscardToEndOfLine();
+
+ // Read the rest of the line raw. We do this because we don't want macros
+ // to be expanded and we don't require that the tokens be valid preprocessing
+ // tokens. For example, this is allowed: "#warning ` 'foo". GCC does
+ // collapse multiple consequtive white space between tokens, but this isn't
+ // specified by the standard.
+ std::string Message = CurLexer->ReadToEndOfLine();
+ if (isWarning)
+ Diag(Tok, diag::pp_hash_warning) << Message;
+ else
+ Diag(Tok, diag::err_pp_hash_error) << Message;
+}
+
+/// HandleIdentSCCSDirective - Handle a #ident/#sccs directive.
+///
+void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
+ // Yes, this directive is an extension.
+ Diag(Tok, diag::ext_pp_ident_directive);
+
+ // Read the string argument.
+ Token StrTok;
+ Lex(StrTok);
+
+ // If the token kind isn't a string, it's a malformed directive.
+ if (StrTok.isNot(tok::string_literal) &&
+ StrTok.isNot(tok::wide_string_literal)) {
+ Diag(StrTok, diag::err_pp_malformed_ident);
+ if (StrTok.isNot(tok::eom))
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Verify that there is nothing after the string, other than EOM.
+ CheckEndOfDirective("ident");
+
+ if (Callbacks)
+ Callbacks->Ident(Tok.getLocation(), getSpelling(StrTok));
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Include Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// GetIncludeFilenameSpelling - Turn the specified lexer token into a fully
+/// checked and spelled filename, e.g. as an operand of #include. This returns
+/// true if the input filename was in <>'s or false if it were in ""'s. The
+/// caller is expected to provide a buffer that is large enough to hold the
+/// spelling of the filename, but is also expected to handle the case when
+/// this method decides to use a different buffer.
+bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
+ const char *&BufStart,
+ const char *&BufEnd) {
+ // Get the text form of the filename.
+ assert(BufStart != BufEnd && "Can't have tokens with empty spellings!");
+
+ // Make sure the filename is <x> or "x".
+ bool isAngled;
+ if (BufStart[0] == '<') {
+ if (BufEnd[-1] != '>') {
+ Diag(Loc, diag::err_pp_expects_filename);
+ BufStart = 0;
+ return true;
+ }
+ isAngled = true;
+ } else if (BufStart[0] == '"') {
+ if (BufEnd[-1] != '"') {
+ Diag(Loc, diag::err_pp_expects_filename);
+ BufStart = 0;
+ return true;
+ }
+ isAngled = false;
+ } else {
+ Diag(Loc, diag::err_pp_expects_filename);
+ BufStart = 0;
+ return true;
+ }
+
+ // Diagnose #include "" as invalid.
+ if (BufEnd-BufStart <= 2) {
+ Diag(Loc, diag::err_pp_empty_filename);
+ BufStart = 0;
+ return "";
+ }
+
+ // Skip the brackets.
+ ++BufStart;
+ --BufEnd;
+ return isAngled;
+}
+
+/// ConcatenateIncludeName - Handle cases where the #include name is expanded
+/// from a macro as multiple tokens, which need to be glued together. This
+/// occurs for code like:
+/// #define FOO <a/b.h>
+/// #include FOO
+/// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
+///
+/// This code concatenates and consumes tokens up to the '>' token. It returns
+/// false if the > was found, otherwise it returns true if it finds and consumes
+/// the EOM marker.
+static bool ConcatenateIncludeName(llvm::SmallVector<char, 128> &FilenameBuffer,
+ Preprocessor &PP) {
+ Token CurTok;
+
+ PP.Lex(CurTok);
+ while (CurTok.isNot(tok::eom)) {
+ // Append the spelling of this token to the buffer. If there was a space
+ // before it, add it now.
+ if (CurTok.hasLeadingSpace())
+ FilenameBuffer.push_back(' ');
+
+ // Get the spelling of the token, directly into FilenameBuffer if possible.
+ unsigned PreAppendSize = FilenameBuffer.size();
+ FilenameBuffer.resize(PreAppendSize+CurTok.getLength());
+
+ const char *BufPtr = &FilenameBuffer[PreAppendSize];
+ unsigned ActualLen = PP.getSpelling(CurTok, BufPtr);
+
+ // If the token was spelled somewhere else, copy it into FilenameBuffer.
+ if (BufPtr != &FilenameBuffer[PreAppendSize])
+ memcpy(&FilenameBuffer[PreAppendSize], BufPtr, ActualLen);
+
+ // Resize FilenameBuffer to the correct size.
+ if (CurTok.getLength() != ActualLen)
+ FilenameBuffer.resize(PreAppendSize+ActualLen);
+
+ // If we found the '>' marker, return success.
+ if (CurTok.is(tok::greater))
+ return false;
+
+ PP.Lex(CurTok);
+ }
+
+ // If we hit the eom marker, emit an error and return true so that the caller
+ // knows the EOM has been read.
+ PP.Diag(CurTok.getLocation(), diag::err_pp_expects_filename);
+ return true;
+}
+
+/// HandleIncludeDirective - The "#include" tokens have just been read, read the
+/// file to be included from the lexer, then include it! This is a common
+/// routine with functionality shared between #include, #include_next and
+/// #import. LookupFrom is set when this is a #include_next directive, it
+/// specifies the file to start searching from.
+void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
+ const DirectoryLookup *LookupFrom,
+ bool isImport) {
+
+ Token FilenameTok;
+ CurPPLexer->LexIncludeFilename(FilenameTok);
+
+ // Reserve a buffer to get the spelling.
+ llvm::SmallVector<char, 128> FilenameBuffer;
+ const char *FilenameStart, *FilenameEnd;
+
+ switch (FilenameTok.getKind()) {
+ case tok::eom:
+ // If the token kind is EOM, the error has already been diagnosed.
+ return;
+
+ case tok::angle_string_literal:
+ case tok::string_literal: {
+ FilenameBuffer.resize(FilenameTok.getLength());
+ FilenameStart = &FilenameBuffer[0];
+ unsigned Len = getSpelling(FilenameTok, FilenameStart);
+ FilenameEnd = FilenameStart+Len;
+ break;
+ }
+
+ case tok::less:
+ // This could be a <foo/bar.h> file coming from a macro expansion. In this
+ // case, glue the tokens together into FilenameBuffer and interpret those.
+ FilenameBuffer.push_back('<');
+ if (ConcatenateIncludeName(FilenameBuffer, *this))
+ return; // Found <eom> but no ">"? Diagnostic already emitted.
+ FilenameStart = FilenameBuffer.data();
+ FilenameEnd = FilenameStart + FilenameBuffer.size();
+ break;
+ default:
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ bool isAngled = GetIncludeFilenameSpelling(FilenameTok.getLocation(),
+ FilenameStart, FilenameEnd);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (FilenameStart == 0) {
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Verify that there is nothing after the filename, other than EOM. Note that
+ // we allow macros that expand to nothing after the filename, because this
+ // falls into the category of "#include pp-tokens new-line" specified in
+ // C99 6.10.2p4.
+ CheckEndOfDirective(IncludeTok.getIdentifierInfo()->getName(), true);
+
+ // Check that we don't have infinite #include recursion.
+ if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
+ Diag(FilenameTok, diag::err_pp_include_too_deep);
+ return;
+ }
+
+ // Search include directories.
+ const DirectoryLookup *CurDir;
+ const FileEntry *File = LookupFile(FilenameStart, FilenameEnd,
+ isAngled, LookupFrom, CurDir);
+ if (File == 0) {
+ Diag(FilenameTok, diag::err_pp_file_not_found)
+ << std::string(FilenameStart, FilenameEnd);
+ return;
+ }
+
+ // Ask HeaderInfo if we should enter this #include file. If not, #including
+ // this file will have no effect.
+ if (!HeaderInfo.ShouldEnterIncludeFile(File, isImport))
+ return;
+
+ // The #included file will be considered to be a system header if either it is
+ // in a system include directory, or if the #includer is a system include
+ // header.
+ SrcMgr::CharacteristicKind FileCharacter =
+ std::max(HeaderInfo.getFileDirFlavor(File),
+ SourceMgr.getFileCharacteristic(FilenameTok.getLocation()));
+
+ // Look up the file, create a File ID for it.
+ FileID FID = SourceMgr.createFileID(File, FilenameTok.getLocation(),
+ FileCharacter);
+ if (FID.isInvalid()) {
+ Diag(FilenameTok, diag::err_pp_file_not_found)
+ << std::string(FilenameStart, FilenameEnd);
+ return;
+ }
+
+ // Finally, if all is good, enter the new file!
+ EnterSourceFile(FID, CurDir);
+}
+
+/// HandleIncludeNextDirective - Implements #include_next.
+///
+void Preprocessor::HandleIncludeNextDirective(Token &IncludeNextTok) {
+ Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
+
+ // #include_next is like #include, except that we start searching after
+ // the current found directory. If we can't do this, issue a
+ // diagnostic.
+ const DirectoryLookup *Lookup = CurDirLookup;
+ if (isInPrimaryFile()) {
+ Lookup = 0;
+ Diag(IncludeNextTok, diag::pp_include_next_in_primary);
+ } else if (Lookup == 0) {
+ Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
+ } else {
+ // Start looking up in the next directory.
+ ++Lookup;
+ }
+
+ return HandleIncludeDirective(IncludeNextTok, Lookup);
+}
+
+/// HandleImportDirective - Implements #import.
+///
+void Preprocessor::HandleImportDirective(Token &ImportTok) {
+ if (!Features.ObjC1) // #import is standard for ObjC.
+ Diag(ImportTok, diag::ext_pp_import_directive);
+
+ return HandleIncludeDirective(ImportTok, 0, true);
+}
+
+/// HandleIncludeMacrosDirective - The -imacros command line option turns into a
+/// pseudo directive in the predefines buffer. This handles it by sucking all
+/// tokens through the preprocessor and discarding them (only keeping the side
+/// effects on the preprocessor).
+void Preprocessor::HandleIncludeMacrosDirective(Token &IncludeMacrosTok) {
+ // This directive should only occur in the predefines buffer. If not, emit an
+ // error and reject it.
+ SourceLocation Loc = IncludeMacrosTok.getLocation();
+ if (strcmp(SourceMgr.getBufferName(Loc), "<built-in>") != 0) {
+ Diag(IncludeMacrosTok.getLocation(),
+ diag::pp_include_macros_out_of_predefines);
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Treat this as a normal #include for checking purposes. If this is
+ // successful, it will push a new lexer onto the include stack.
+ HandleIncludeDirective(IncludeMacrosTok, 0, false);
+
+ Token TmpTok;
+ do {
+ Lex(TmpTok);
+ assert(TmpTok.isNot(tok::eof) && "Didn't find end of -imacros!");
+ } while (TmpTok.isNot(tok::hashhash));
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Macro Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
+/// definition has just been read. Lex the rest of the arguments and the
+/// closing ), updating MI with what we learn. Return true if an error occurs
+/// parsing the arg list.
+bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI) {
+ llvm::SmallVector<IdentifierInfo*, 32> Arguments;
+
+ Token Tok;
+ while (1) {
+ LexUnexpandedToken(Tok);
+ switch (Tok.getKind()) {
+ case tok::r_paren:
+ // Found the end of the argument list.
+ if (Arguments.empty()) // #define FOO()
+ return false;
+ // Otherwise we have #define FOO(A,)
+ Diag(Tok, diag::err_pp_expected_ident_in_arg_list);
+ return true;
+ case tok::ellipsis: // #define X(... -> C99 varargs
+ // Warn if use of C99 feature in non-C99 mode.
+ if (!Features.C99) Diag(Tok, diag::ext_variadic_macro);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ }
+ // Add the __VA_ARGS__ identifier as an argument.
+ Arguments.push_back(Ident__VA_ARGS__);
+ MI->setIsC99Varargs();
+ MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ return false;
+ case tok::eom: // #define X(
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ default:
+ // Handle keywords and identifiers here to accept things like
+ // #define Foo(for) for.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II == 0) {
+ // #define X(1
+ Diag(Tok, diag::err_pp_invalid_tok_in_arg_list);
+ return true;
+ }
+
+ // If this is already used as an argument, it is used multiple times (e.g.
+ // #define X(A,A.
+ if (std::find(Arguments.begin(), Arguments.end(), II) !=
+ Arguments.end()) { // C99 6.10.3p6
+ Diag(Tok, diag::err_pp_duplicate_name_in_arg_list) << II;
+ return true;
+ }
+
+ // Add the argument to the macro info.
+ Arguments.push_back(II);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+
+ switch (Tok.getKind()) {
+ default: // #define X(A B
+ Diag(Tok, diag::err_pp_expected_comma_in_arg_list);
+ return true;
+ case tok::r_paren: // #define X(A)
+ MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ return false;
+ case tok::comma: // #define X(A,
+ break;
+ case tok::ellipsis: // #define X(A... -> GCC extension
+ // Diagnose extension.
+ Diag(Tok, diag::ext_named_variadic_macro);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ }
+
+ MI->setIsGNUVarargs();
+ MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ return false;
+ }
+ }
+ }
+}
+
+/// HandleDefineDirective - Implements #define. This consumes the entire macro
+/// line then lets the caller lex the next real token.
+void Preprocessor::HandleDefineDirective(Token &DefineTok) {
+ ++NumDefined;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, 1);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eom))
+ return;
+
+ Token LastTok = MacroNameTok;
+
+ // If we are supposed to keep comments in #defines, reenable comment saving
+ // mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepMacroComments);
+
+ // Create the new macro.
+ MacroInfo *MI = AllocateMacroInfo(MacroNameTok.getLocation());
+
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ // If this is a function-like macro definition, parse the argument list,
+ // marking each of the identifiers as being used as macro arguments. Also,
+ // check other constraints on the first token of the macro body.
+ if (Tok.is(tok::eom)) {
+ // If there is no body to this macro, we have no special handling here.
+ } else if (Tok.hasLeadingSpace()) {
+ // This is a normal token with leading space. Clear the leading space
+ // marker on the first token to get proper expansion.
+ Tok.clearFlag(Token::LeadingSpace);
+ } else if (Tok.is(tok::l_paren)) {
+ // This is a function-like macro definition. Read the argument list.
+ MI->setIsFunctionLike();
+ if (ReadMacroDefinitionArgList(MI)) {
+ // Forget about MI.
+ ReleaseMacroInfo(MI);
+ // Throw away the rest of the line.
+ if (CurPPLexer->ParsingPreprocessorDirective)
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // If this is a definition of a variadic C99 function-like macro, not using
+ // the GNU named varargs extension, enabled __VA_ARGS__.
+
+ // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
+ // This gets unpoisoned where it is allowed.
+ assert(Ident__VA_ARGS__->isPoisoned() && "__VA_ARGS__ should be poisoned!");
+ if (MI->isC99Varargs())
+ Ident__VA_ARGS__->setIsPoisoned(false);
+
+ // Read the first token after the arg list for down below.
+ LexUnexpandedToken(Tok);
+ } else if (Features.C99) {
+ // C99 requires whitespace between the macro definition and the body. Emit
+ // a diagnostic for something like "#define X+".
+ Diag(Tok, diag::ext_c99_whitespace_required_after_macro_name);
+ } else {
+ // C90 6.8 TC1 says: "In the definition of an object-like macro, if the
+ // first character of a replacement list is not a character required by
+ // subclause 5.2.1, then there shall be white-space separation between the
+ // identifier and the replacement list.". 5.2.1 lists this set:
+ // "A-Za-z0-9!"#%&'()*+,_./:;<=>?[\]^_{|}~" as well as whitespace, which
+ // is irrelevant here.
+ bool isInvalid = false;
+ if (Tok.is(tok::at)) // @ is not in the list above.
+ isInvalid = true;
+ else if (Tok.is(tok::unknown)) {
+ // If we have an unknown token, it is something strange like "`". Since
+ // all of valid characters would have lexed into a single character
+ // token of some sort, we know this is not a valid case.
+ isInvalid = true;
+ }
+ if (isInvalid)
+ Diag(Tok, diag::ext_missing_whitespace_after_macro_name);
+ else
+ Diag(Tok, diag::warn_missing_whitespace_after_macro_name);
+ }
+
+ if (!Tok.is(tok::eom))
+ LastTok = Tok;
+
+ // Read the rest of the macro body.
+ if (MI->isObjectLike()) {
+ // Object-like macros are very simple, just read their body.
+ while (Tok.isNot(tok::eom)) {
+ LastTok = Tok;
+ MI->AddTokenToBody(Tok);
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ }
+
+ } else {
+ // Otherwise, read the body of a function-like macro. While we are at it,
+ // check C99 6.10.3.2p1: ensure that # operators are followed by macro
+ // parameters in function-like macro expansions.
+ while (Tok.isNot(tok::eom)) {
+ LastTok = Tok;
+
+ if (Tok.isNot(tok::hash)) {
+ MI->AddTokenToBody(Tok);
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ continue;
+ }
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+
+ // Check for a valid macro arg identifier.
+ if (Tok.getIdentifierInfo() == 0 ||
+ MI->getArgumentNum(Tok.getIdentifierInfo()) == -1) {
+
+ // If this is assembler-with-cpp mode, we accept random gibberish after
+ // the '#' because '#' is often a comment character. However, change
+ // the kind of the token to tok::unknown so that the preprocessor isn't
+ // confused.
+ if (getLangOptions().AsmPreprocessor && Tok.isNot(tok::eom)) {
+ LastTok.setKind(tok::unknown);
+ } else {
+ Diag(Tok, diag::err_pp_stringize_not_parameter);
+ ReleaseMacroInfo(MI);
+
+ // Disable __VA_ARGS__ again.
+ Ident__VA_ARGS__->setIsPoisoned(true);
+ return;
+ }
+ }
+
+ // Things look ok, add the '#' and param name tokens to the macro.
+ MI->AddTokenToBody(LastTok);
+ MI->AddTokenToBody(Tok);
+ LastTok = Tok;
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ }
+ }
+
+
+ // Disable __VA_ARGS__ again.
+ Ident__VA_ARGS__->setIsPoisoned(true);
+
+ // Check that there is no paste (##) operator at the begining or end of the
+ // replacement list.
+ unsigned NumTokens = MI->getNumTokens();
+ if (NumTokens != 0) {
+ if (MI->getReplacementToken(0).is(tok::hashhash)) {
+ Diag(MI->getReplacementToken(0), diag::err_paste_at_start);
+ ReleaseMacroInfo(MI);
+ return;
+ }
+ if (MI->getReplacementToken(NumTokens-1).is(tok::hashhash)) {
+ Diag(MI->getReplacementToken(NumTokens-1), diag::err_paste_at_end);
+ ReleaseMacroInfo(MI);
+ return;
+ }
+ }
+
+ // If this is the primary source file, remember that this macro hasn't been
+ // used yet.
+ if (isInPrimaryFile())
+ MI->setIsUsed(false);
+
+ MI->setDefinitionEndLoc(LastTok.getLocation());
+
+ // Finally, if this identifier already had a macro defined for it, verify that
+ // the macro bodies are identical and free the old definition.
+ if (MacroInfo *OtherMI = getMacroInfo(MacroNameTok.getIdentifierInfo())) {
+ // It is very common for system headers to have tons of macro redefinitions
+ // and for warnings to be disabled in system headers. If this is the case,
+ // then don't bother calling MacroInfo::isIdenticalTo.
+ if (!getDiagnostics().getSuppressSystemWarnings() ||
+ !SourceMgr.isInSystemHeader(DefineTok.getLocation())) {
+ if (!OtherMI->isUsed())
+ Diag(OtherMI->getDefinitionLoc(), diag::pp_macro_not_used);
+
+ // Macros must be identical. This means all tokes and whitespace
+ // separation must be the same. C99 6.10.3.2.
+ if (!MI->isIdenticalTo(*OtherMI, *this)) {
+ Diag(MI->getDefinitionLoc(), diag::ext_pp_macro_redef)
+ << MacroNameTok.getIdentifierInfo();
+ Diag(OtherMI->getDefinitionLoc(), diag::note_previous_definition);
+ }
+ }
+
+ ReleaseMacroInfo(OtherMI);
+ }
+
+ setMacroInfo(MacroNameTok.getIdentifierInfo(), MI);
+
+ // If the callbacks want to know, tell them about the macro definition.
+ if (Callbacks)
+ Callbacks->MacroDefined(MacroNameTok.getIdentifierInfo(), MI);
+}
+
+/// HandleUndefDirective - Implements #undef.
+///
+void Preprocessor::HandleUndefDirective(Token &UndefTok) {
+ ++NumUndefined;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, 2);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eom))
+ return;
+
+ // Check to see if this is the last token on the #undef line.
+ CheckEndOfDirective("undef");
+
+ // Okay, we finally have a valid identifier to undef.
+ MacroInfo *MI = getMacroInfo(MacroNameTok.getIdentifierInfo());
+
+ // If the macro is not defined, this is a noop undef, just return.
+ if (MI == 0) return;
+
+ if (!MI->isUsed())
+ Diag(MI->getDefinitionLoc(), diag::pp_macro_not_used);
+
+ // If the callbacks want to know, tell them about the macro #undef.
+ if (Callbacks)
+ Callbacks->MacroUndefined(MacroNameTok.getIdentifierInfo(), MI);
+
+ // Free macro definition.
+ ReleaseMacroInfo(MI);
+ setMacroInfo(MacroNameTok.getIdentifierInfo(), 0);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Conditional Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandleIfdefDirective - Implements the #ifdef/#ifndef directive. isIfndef is
+/// true when this is a #ifndef directive. ReadAnyTokensBeforeDirective is true
+/// if any tokens have been returned or pp-directives activated before this
+/// #ifndef has been lexed.
+///
+void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
+ bool ReadAnyTokensBeforeDirective) {
+ ++NumIf;
+ Token DirectiveTok = Result;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eom)) {
+ // Skip code until we get to #endif. This helps with recovery by not
+ // emitting an error when the #endif is reached.
+ SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
+ /*Foundnonskip*/false, /*FoundElse*/false);
+ return;
+ }
+
+ // Check to see if this is the last token on the #if[n]def line.
+ CheckEndOfDirective(isIfndef ? "ifndef" : "ifdef");
+
+ if (CurPPLexer->getConditionalStackDepth() == 0) {
+ // If the start of a top-level #ifdef, inform MIOpt.
+ if (!ReadAnyTokensBeforeDirective) {
+ assert(isIfndef && "#ifdef shouldn't reach here");
+ CurPPLexer->MIOpt.EnterTopLevelIFNDEF(MacroNameTok.getIdentifierInfo());
+ } else
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+ }
+
+ IdentifierInfo *MII = MacroNameTok.getIdentifierInfo();
+ MacroInfo *MI = getMacroInfo(MII);
+
+ // If there is a macro, process it.
+ if (MI) // Mark it used.
+ MI->setIsUsed(true);
+
+ // Should we include the stuff contained by this directive?
+ if (!MI == isIfndef) {
+ // Yes, remember that we are inside a conditional, then lex the next token.
+ CurPPLexer->pushConditionalLevel(DirectiveTok.getLocation(), /*wasskip*/false,
+ /*foundnonskip*/true, /*foundelse*/false);
+ } else {
+ // No, skip the contents of this block and return the first token after it.
+ SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
+ /*Foundnonskip*/false,
+ /*FoundElse*/false);
+ }
+}
+
+/// HandleIfDirective - Implements the #if directive.
+///
+void Preprocessor::HandleIfDirective(Token &IfToken,
+ bool ReadAnyTokensBeforeDirective) {
+ ++NumIf;
+
+ // Parse and evaluation the conditional expression.
+ IdentifierInfo *IfNDefMacro = 0;
+ bool ConditionalTrue = EvaluateDirectiveExpression(IfNDefMacro);
+
+
+ // If this condition is equivalent to #ifndef X, and if this is the first
+ // directive seen, handle it for the multiple-include optimization.
+ if (CurPPLexer->getConditionalStackDepth() == 0) {
+ if (!ReadAnyTokensBeforeDirective && IfNDefMacro)
+ CurPPLexer->MIOpt.EnterTopLevelIFNDEF(IfNDefMacro);
+ else
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+ }
+
+ // Should we include the stuff contained by this directive?
+ if (ConditionalTrue) {
+ // Yes, remember that we are inside a conditional, then lex the next token.
+ CurPPLexer->pushConditionalLevel(IfToken.getLocation(), /*wasskip*/false,
+ /*foundnonskip*/true, /*foundelse*/false);
+ } else {
+ // No, skip the contents of this block and return the first token after it.
+ SkipExcludedConditionalBlock(IfToken.getLocation(), /*Foundnonskip*/false,
+ /*FoundElse*/false);
+ }
+}
+
+/// HandleEndifDirective - Implements the #endif directive.
+///
+void Preprocessor::HandleEndifDirective(Token &EndifToken) {
+ ++NumEndif;
+
+ // Check that this is the whole directive.
+ CheckEndOfDirective("endif");
+
+ PPConditionalInfo CondInfo;
+ if (CurPPLexer->popConditionalLevel(CondInfo)) {
+ // No conditionals on the stack: this is an #endif without an #if.
+ Diag(EndifToken, diag::err_pp_endif_without_if);
+ return;
+ }
+
+ // If this the end of a top-level #endif, inform MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.ExitTopLevelConditional();
+
+ assert(!CondInfo.WasSkipping && !CurPPLexer->LexingRawMode &&
+ "This code should only be reachable in the non-skipping case!");
+}
+
+
+void Preprocessor::HandleElseDirective(Token &Result) {
+ ++NumElse;
+
+ // #else directive in a non-skipping conditional... start skipping.
+ CheckEndOfDirective("else");
+
+ PPConditionalInfo CI;
+ if (CurPPLexer->popConditionalLevel(CI)) {
+ Diag(Result, diag::pp_err_else_without_if);
+ return;
+ }
+
+ // If this is a top-level #else, inform the MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+
+ // If this is a #else with a #else before it, report the error.
+ if (CI.FoundElse) Diag(Result, diag::pp_err_else_after_else);
+
+ // Finally, skip the rest of the contents of this block and return the first
+ // token after it.
+ return SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/true);
+}
+
+void Preprocessor::HandleElifDirective(Token &ElifToken) {
+ ++NumElse;
+
+ // #elif directive in a non-skipping conditional... start skipping.
+ // We don't care what the condition is, because we will always skip it (since
+ // the block immediately before it was included).
+ DiscardUntilEndOfDirective();
+
+ PPConditionalInfo CI;
+ if (CurPPLexer->popConditionalLevel(CI)) {
+ Diag(ElifToken, diag::pp_err_elif_without_if);
+ return;
+ }
+
+ // If this is a top-level #elif, inform the MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
+
+ // Finally, skip the rest of the contents of this block and return the first
+ // token after it.
+ return SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/CI.FoundElse);
+}
+
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
new file mode 100644
index 0000000..709e316
--- /dev/null
+++ b/lib/Lex/PPExpressions.cpp
@@ -0,0 +1,717 @@
+//===--- PPExpressions.cpp - Preprocessor Expression Evaluation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Preprocessor::EvaluateDirectiveExpression method,
+// which parses and evaluates integer constant expressions for #if directives.
+//
+//===----------------------------------------------------------------------===//
+//
+// FIXME: implement testing for #assert's.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/APSInt.h"
+using namespace clang;
+
+/// PPValue - Represents the value of a subexpression of a preprocessor
+/// conditional and the source range covered by it.
+class PPValue {
+ SourceRange Range;
+public:
+ llvm::APSInt Val;
+
+ // Default ctor - Construct an 'invalid' PPValue.
+ PPValue(unsigned BitWidth) : Val(BitWidth) {}
+
+ unsigned getBitWidth() const { return Val.getBitWidth(); }
+ bool isUnsigned() const { return Val.isUnsigned(); }
+
+ const SourceRange &getRange() const { return Range; }
+
+ void setRange(SourceLocation L) { Range.setBegin(L); Range.setEnd(L); }
+ void setRange(SourceLocation B, SourceLocation E) {
+ Range.setBegin(B); Range.setEnd(E);
+ }
+ void setBegin(SourceLocation L) { Range.setBegin(L); }
+ void setEnd(SourceLocation L) { Range.setEnd(L); }
+};
+
+static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
+ Token &PeekTok, bool ValueLive,
+ Preprocessor &PP);
+
+/// DefinedTracker - This struct is used while parsing expressions to keep track
+/// of whether !defined(X) has been seen.
+///
+/// With this simple scheme, we handle the basic forms:
+/// !defined(X) and !defined X
+/// but we also trivially handle (silly) stuff like:
+/// !!!defined(X) and +!defined(X) and !+!+!defined(X) and !(defined(X)).
+struct DefinedTracker {
+ /// Each time a Value is evaluated, it returns information about whether the
+ /// parsed value is of the form defined(X), !defined(X) or is something else.
+ enum TrackerState {
+ DefinedMacro, // defined(X)
+ NotDefinedMacro, // !defined(X)
+ Unknown // Something else.
+ } State;
+ /// TheMacro - When the state is DefinedMacro or NotDefinedMacro, this
+ /// indicates the macro that was checked.
+ IdentifierInfo *TheMacro;
+};
+
+/// EvaluateValue - Evaluate the token PeekTok (and any others needed) and
+/// return the computed value in Result. Return true if there was an error
+/// parsing. This function also returns information about the form of the
+/// expression in DT. See above for information on what DT means.
+///
+/// If ValueLive is false, then this value is being evaluated in a context where
+/// the result is not used. As such, avoid diagnostics that relate to
+/// evaluation.
+static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
+ bool ValueLive, Preprocessor &PP) {
+ DT.State = DefinedTracker::Unknown;
+
+ // If this token's spelling is a pp-identifier, check to see if it is
+ // 'defined' or if it is a macro. Note that we check here because many
+ // keywords are pp-identifiers, so we can't check the kind.
+ if (IdentifierInfo *II = PeekTok.getIdentifierInfo()) {
+ // If this identifier isn't 'defined' and it wasn't macro expanded, it turns
+ // into a simple 0, unless it is the C++ keyword "true", in which case it
+ // turns into "1".
+ if (!II->isStr("defined")) {
+ if (ValueLive)
+ PP.Diag(PeekTok, diag::warn_pp_undef_identifier) << II;
+ Result.Val = II->getTokenID() == tok::kw_true;
+ Result.Val.setIsUnsigned(false); // "0" is signed intmax_t 0.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+
+ // Handle "defined X" and "defined(X)".
+ Result.setBegin(PeekTok.getLocation());
+
+ // Get the next token, don't expand it.
+ PP.LexUnexpandedToken(PeekTok);
+
+ // Two options, it can either be a pp-identifier or a (.
+ SourceLocation LParenLoc;
+ if (PeekTok.is(tok::l_paren)) {
+ // Found a paren, remember we saw it and skip it.
+ LParenLoc = PeekTok.getLocation();
+ PP.LexUnexpandedToken(PeekTok);
+ }
+
+ // If we don't have a pp-identifier now, this is an error.
+ if ((II = PeekTok.getIdentifierInfo()) == 0) {
+ PP.Diag(PeekTok, diag::err_pp_defined_requires_identifier);
+ return true;
+ }
+
+ // Otherwise, we got an identifier, is it defined to something?
+ Result.Val = II->hasMacroDefinition();
+ Result.Val.setIsUnsigned(false); // Result is signed intmax_t.
+
+ // If there is a macro, mark it used.
+ if (Result.Val != 0 && ValueLive) {
+ MacroInfo *Macro = PP.getMacroInfo(II);
+ Macro->setIsUsed(true);
+ }
+
+ // Consume identifier.
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+
+ // If we are in parens, ensure we have a trailing ).
+ if (LParenLoc.isValid()) {
+ if (PeekTok.isNot(tok::r_paren)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_missing_rparen);
+ PP.Diag(LParenLoc, diag::note_matching) << "(";
+ return true;
+ }
+ // Consume the ).
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ }
+
+ // Success, remember that we saw defined(X).
+ DT.State = DefinedTracker::DefinedMacro;
+ DT.TheMacro = II;
+ return false;
+ }
+
+ switch (PeekTok.getKind()) {
+ default: // Non-value token.
+ PP.Diag(PeekTok, diag::err_pp_expr_bad_token_start_expr);
+ return true;
+ case tok::eom:
+ case tok::r_paren:
+ // If there is no expression, report and exit.
+ PP.Diag(PeekTok, diag::err_pp_expected_value_in_expr);
+ return true;
+ case tok::numeric_constant: {
+ llvm::SmallString<64> IntegerBuffer;
+ IntegerBuffer.resize(PeekTok.getLength());
+ const char *ThisTokBegin = &IntegerBuffer[0];
+ unsigned ActualLength = PP.getSpelling(PeekTok, ThisTokBegin);
+ NumericLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
+ PeekTok.getLocation(), PP);
+ if (Literal.hadError)
+ return true; // a diagnostic was already reported.
+
+ if (Literal.isFloatingLiteral() || Literal.isImaginary) {
+ PP.Diag(PeekTok, diag::err_pp_illegal_floating_literal);
+ return true;
+ }
+ assert(Literal.isIntegerLiteral() && "Unknown ppnumber");
+
+ // long long is a C99 feature.
+ if (!PP.getLangOptions().C99 && !PP.getLangOptions().CPlusPlus0x
+ && Literal.isLongLong)
+ PP.Diag(PeekTok, diag::ext_longlong);
+
+ // Parse the integer literal into Result.
+ if (Literal.GetIntegerValue(Result.Val)) {
+ // Overflow parsing integer literal.
+ if (ValueLive) PP.Diag(PeekTok, diag::warn_integer_too_large);
+ Result.Val.setIsUnsigned(true);
+ } else {
+ // Set the signedness of the result to match whether there was a U suffix
+ // or not.
+ Result.Val.setIsUnsigned(Literal.isUnsigned);
+
+ // Detect overflow based on whether the value is signed. If signed
+ // and if the value is too large, emit a warning "integer constant is so
+ // large that it is unsigned" e.g. on 12345678901234567890 where intmax_t
+ // is 64-bits.
+ if (!Literal.isUnsigned && Result.Val.isNegative()) {
+ // Don't warn for a hex literal: 0x8000..0 shouldn't warn.
+ if (ValueLive && Literal.getRadix() != 16)
+ PP.Diag(PeekTok, diag::warn_integer_too_large_for_signed);
+ Result.Val.setIsUnsigned(true);
+ }
+ }
+
+ // Consume the token.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+ case tok::char_constant: { // 'x'
+ llvm::SmallString<32> CharBuffer;
+ CharBuffer.resize(PeekTok.getLength());
+ const char *ThisTokBegin = &CharBuffer[0];
+ unsigned ActualLength = PP.getSpelling(PeekTok, ThisTokBegin);
+ CharLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
+ PeekTok.getLocation(), PP);
+ if (Literal.hadError())
+ return true; // A diagnostic was already emitted.
+
+ // Character literals are always int or wchar_t, expand to intmax_t.
+ TargetInfo &TI = PP.getTargetInfo();
+ unsigned NumBits;
+ if (Literal.isMultiChar())
+ NumBits = TI.getIntWidth();
+ else
+ NumBits = TI.getCharWidth(Literal.isWide());
+
+ // Set the width.
+ llvm::APSInt Val(NumBits);
+ // Set the value.
+ Val = Literal.getValue();
+ // Set the signedness.
+ Val.setIsUnsigned(!TI.isCharSigned());
+
+ if (Result.Val.getBitWidth() > Val.getBitWidth()) {
+ Result.Val = Val.extend(Result.Val.getBitWidth());
+ } else {
+ assert(Result.Val.getBitWidth() == Val.getBitWidth() &&
+ "intmax_t smaller than char/wchar_t?");
+ Result.Val = Val;
+ }
+
+ // Consume the token.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+ case tok::l_paren: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok); // Eat the (.
+ // Parse the value and if there are any binary operators involved, parse
+ // them.
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+
+ // If this is a silly value like (X), which doesn't need parens, check for
+ // !(defined X).
+ if (PeekTok.is(tok::r_paren)) {
+ // Just use DT unmodified as our result.
+ } else {
+ // Otherwise, we have something like (x+y), and we consumed '(x'.
+ if (EvaluateDirectiveSubExpr(Result, 1, PeekTok, ValueLive, PP))
+ return true;
+
+ if (PeekTok.isNot(tok::r_paren)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expected_rparen)
+ << Result.getRange();
+ PP.Diag(Start, diag::note_matching) << "(";
+ return true;
+ }
+ DT.State = DefinedTracker::Unknown;
+ }
+ Result.setRange(Start, PeekTok.getLocation());
+ PP.LexNonComment(PeekTok); // Eat the ).
+ return false;
+ }
+ case tok::plus: {
+ SourceLocation Start = PeekTok.getLocation();
+ // Unary plus doesn't modify the value.
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+ return false;
+ }
+ case tok::minus: {
+ SourceLocation Loc = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Loc);
+
+ // C99 6.5.3.3p3: The sign of the result matches the sign of the operand.
+ Result.Val = -Result.Val;
+
+ // -MININT is the only thing that overflows. Unsigned never overflows.
+ bool Overflow = !Result.isUnsigned() && Result.Val.isMinSignedValue();
+
+ // If this operator is live and overflowed, report the issue.
+ if (Overflow && ValueLive)
+ PP.Diag(Loc, diag::warn_pp_expr_overflow) << Result.getRange();
+
+ DT.State = DefinedTracker::Unknown;
+ return false;
+ }
+
+ case tok::tilde: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+
+ // C99 6.5.3.3p4: The sign of the result matches the sign of the operand.
+ Result.Val = ~Result.Val;
+ DT.State = DefinedTracker::Unknown;
+ return false;
+ }
+
+ case tok::exclaim: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+ Result.Val = !Result.Val;
+ // C99 6.5.3.3p5: The sign of the result is 'int', aka it is signed.
+ Result.Val.setIsUnsigned(false);
+
+ if (DT.State == DefinedTracker::DefinedMacro)
+ DT.State = DefinedTracker::NotDefinedMacro;
+ else if (DT.State == DefinedTracker::NotDefinedMacro)
+ DT.State = DefinedTracker::DefinedMacro;
+ return false;
+ }
+
+ // FIXME: Handle #assert
+ }
+}
+
+
+
+/// getPrecedence - Return the precedence of the specified binary operator
+/// token. This returns:
+/// ~0 - Invalid token.
+/// 14 -> 3 - various operators.
+/// 0 - 'eom' or ')'
+static unsigned getPrecedence(tok::TokenKind Kind) {
+ switch (Kind) {
+ default: return ~0U;
+ case tok::percent:
+ case tok::slash:
+ case tok::star: return 14;
+ case tok::plus:
+ case tok::minus: return 13;
+ case tok::lessless:
+ case tok::greatergreater: return 12;
+ case tok::lessequal:
+ case tok::less:
+ case tok::greaterequal:
+ case tok::greater: return 11;
+ case tok::exclaimequal:
+ case tok::equalequal: return 10;
+ case tok::amp: return 9;
+ case tok::caret: return 8;
+ case tok::pipe: return 7;
+ case tok::ampamp: return 6;
+ case tok::pipepipe: return 5;
+ case tok::question: return 4;
+ case tok::comma: return 3;
+ case tok::colon: return 2;
+ case tok::r_paren: return 0; // Lowest priority, end of expr.
+ case tok::eom: return 0; // Lowest priority, end of macro.
+ }
+}
+
+
+/// EvaluateDirectiveSubExpr - Evaluate the subexpression whose first token is
+/// PeekTok, and whose precedence is PeekPrec. This returns the result in LHS.
+///
+/// If ValueLive is false, then this value is being evaluated in a context where
+/// the result is not used. As such, avoid diagnostics that relate to
+/// evaluation, such as division by zero warnings.
+static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
+ Token &PeekTok, bool ValueLive,
+ Preprocessor &PP) {
+ unsigned PeekPrec = getPrecedence(PeekTok.getKind());
+ // If this token isn't valid, report the error.
+ if (PeekPrec == ~0U) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
+ << LHS.getRange();
+ return true;
+ }
+
+ while (1) {
+ // If this token has a lower precedence than we are allowed to parse, return
+ // it so that higher levels of the recursion can parse it.
+ if (PeekPrec < MinPrec)
+ return false;
+
+ tok::TokenKind Operator = PeekTok.getKind();
+
+ // If this is a short-circuiting operator, see if the RHS of the operator is
+ // dead. Note that this cannot just clobber ValueLive. Consider
+ // "0 && 1 ? 4 : 1 / 0", which is parsed as "(0 && 1) ? 4 : (1 / 0)". In
+ // this example, the RHS of the && being dead does not make the rest of the
+ // expr dead.
+ bool RHSIsLive;
+ if (Operator == tok::ampamp && LHS.Val == 0)
+ RHSIsLive = false; // RHS of "0 && x" is dead.
+ else if (Operator == tok::pipepipe && LHS.Val != 0)
+ RHSIsLive = false; // RHS of "1 || x" is dead.
+ else if (Operator == tok::question && LHS.Val == 0)
+ RHSIsLive = false; // RHS (x) of "0 ? x : y" is dead.
+ else
+ RHSIsLive = ValueLive;
+
+ // Consume the operator, remembering the operator's location for reporting.
+ SourceLocation OpLoc = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+
+ PPValue RHS(LHS.getBitWidth());
+ // Parse the RHS of the operator.
+ DefinedTracker DT;
+ if (EvaluateValue(RHS, PeekTok, DT, RHSIsLive, PP)) return true;
+
+ // Remember the precedence of this operator and get the precedence of the
+ // operator immediately to the right of the RHS.
+ unsigned ThisPrec = PeekPrec;
+ PeekPrec = getPrecedence(PeekTok.getKind());
+
+ // If this token isn't valid, report the error.
+ if (PeekPrec == ~0U) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
+ << RHS.getRange();
+ return true;
+ }
+
+ // Decide whether to include the next binop in this subexpression. For
+ // example, when parsing x+y*z and looking at '*', we want to recursively
+ // handle y*z as a single subexpression. We do this because the precedence
+ // of * is higher than that of +. The only strange case we have to handle
+ // here is for the ?: operator, where the precedence is actually lower than
+ // the LHS of the '?'. The grammar rule is:
+ //
+ // conditional-expression ::=
+ // logical-OR-expression ? expression : conditional-expression
+ // where 'expression' is actually comma-expression.
+ unsigned RHSPrec;
+ if (Operator == tok::question)
+ // The RHS of "?" should be maximally consumed as an expression.
+ RHSPrec = getPrecedence(tok::comma);
+ else // All others should munch while higher precedence.
+ RHSPrec = ThisPrec+1;
+
+ if (PeekPrec >= RHSPrec) {
+ if (EvaluateDirectiveSubExpr(RHS, RHSPrec, PeekTok, RHSIsLive, PP))
+ return true;
+ PeekPrec = getPrecedence(PeekTok.getKind());
+ }
+ assert(PeekPrec <= ThisPrec && "Recursion didn't work!");
+
+ // Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
+ // either operand is unsigned.
+ llvm::APSInt Res(LHS.getBitWidth());
+ switch (Operator) {
+ case tok::question: // No UAC for x and y in "x ? y : z".
+ case tok::lessless: // Shift amount doesn't UAC with shift value.
+ case tok::greatergreater: // Shift amount doesn't UAC with shift value.
+ case tok::comma: // Comma operands are not subject to UACs.
+ case tok::pipepipe: // Logical || does not do UACs.
+ case tok::ampamp: // Logical && does not do UACs.
+ break; // No UAC
+ default:
+ Res.setIsUnsigned(LHS.isUnsigned()|RHS.isUnsigned());
+ // If this just promoted something from signed to unsigned, and if the
+ // value was negative, warn about it.
+ if (ValueLive && Res.isUnsigned()) {
+ if (!LHS.isUnsigned() && LHS.Val.isNegative())
+ PP.Diag(OpLoc, diag::warn_pp_convert_lhs_to_positive)
+ << LHS.Val.toString(10, true) + " to " +
+ LHS.Val.toString(10, false)
+ << LHS.getRange() << RHS.getRange();
+ if (!RHS.isUnsigned() && RHS.Val.isNegative())
+ PP.Diag(OpLoc, diag::warn_pp_convert_rhs_to_positive)
+ << RHS.Val.toString(10, true) + " to " +
+ RHS.Val.toString(10, false)
+ << LHS.getRange() << RHS.getRange();
+ }
+ LHS.Val.setIsUnsigned(Res.isUnsigned());
+ RHS.Val.setIsUnsigned(Res.isUnsigned());
+ }
+
+ // FIXME: All of these should detect and report overflow??
+ bool Overflow = false;
+ switch (Operator) {
+ default: assert(0 && "Unknown operator token!");
+ case tok::percent:
+ if (RHS.Val != 0)
+ Res = LHS.Val % RHS.Val;
+ else if (ValueLive) {
+ PP.Diag(OpLoc, diag::err_pp_remainder_by_zero)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+ break;
+ case tok::slash:
+ if (RHS.Val != 0) {
+ Res = LHS.Val / RHS.Val;
+ if (LHS.Val.isSigned()) // MININT/-1 --> overflow.
+ Overflow = LHS.Val.isMinSignedValue() && RHS.Val.isAllOnesValue();
+ } else if (ValueLive) {
+ PP.Diag(OpLoc, diag::err_pp_division_by_zero)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+ break;
+
+ case tok::star:
+ Res = LHS.Val * RHS.Val;
+ if (Res.isSigned() && LHS.Val != 0 && RHS.Val != 0)
+ Overflow = Res/RHS.Val != LHS.Val || Res/LHS.Val != RHS.Val;
+ break;
+ case tok::lessless: {
+ // Determine whether overflow is about to happen.
+ unsigned ShAmt = static_cast<unsigned>(RHS.Val.getLimitedValue());
+ if (ShAmt >= LHS.Val.getBitWidth())
+ Overflow = true, ShAmt = LHS.Val.getBitWidth()-1;
+ else if (LHS.isUnsigned())
+ Overflow = false;
+ else if (LHS.Val.isNonNegative()) // Don't allow sign change.
+ Overflow = ShAmt >= LHS.Val.countLeadingZeros();
+ else
+ Overflow = ShAmt >= LHS.Val.countLeadingOnes();
+
+ Res = LHS.Val << ShAmt;
+ break;
+ }
+ case tok::greatergreater: {
+ // Determine whether overflow is about to happen.
+ unsigned ShAmt = static_cast<unsigned>(RHS.Val.getLimitedValue());
+ if (ShAmt >= LHS.getBitWidth())
+ Overflow = true, ShAmt = LHS.getBitWidth()-1;
+ Res = LHS.Val >> ShAmt;
+ break;
+ }
+ case tok::plus:
+ Res = LHS.Val + RHS.Val;
+ if (LHS.isUnsigned())
+ Overflow = false;
+ else if (LHS.Val.isNonNegative() == RHS.Val.isNonNegative() &&
+ Res.isNonNegative() != LHS.Val.isNonNegative())
+ Overflow = true; // Overflow for signed addition.
+ break;
+ case tok::minus:
+ Res = LHS.Val - RHS.Val;
+ if (LHS.isUnsigned())
+ Overflow = false;
+ else if (LHS.Val.isNonNegative() != RHS.Val.isNonNegative() &&
+ Res.isNonNegative() != LHS.Val.isNonNegative())
+ Overflow = true; // Overflow for signed subtraction.
+ break;
+ case tok::lessequal:
+ Res = LHS.Val <= RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::less:
+ Res = LHS.Val < RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::greaterequal:
+ Res = LHS.Val >= RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::greater:
+ Res = LHS.Val > RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::exclaimequal:
+ Res = LHS.Val != RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.9p3, result is always int (signed)
+ break;
+ case tok::equalequal:
+ Res = LHS.Val == RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.9p3, result is always int (signed)
+ break;
+ case tok::amp:
+ Res = LHS.Val & RHS.Val;
+ break;
+ case tok::caret:
+ Res = LHS.Val ^ RHS.Val;
+ break;
+ case tok::pipe:
+ Res = LHS.Val | RHS.Val;
+ break;
+ case tok::ampamp:
+ Res = (LHS.Val != 0 && RHS.Val != 0);
+ Res.setIsUnsigned(false); // C99 6.5.13p3, result is always int (signed)
+ break;
+ case tok::pipepipe:
+ Res = (LHS.Val != 0 || RHS.Val != 0);
+ Res.setIsUnsigned(false); // C99 6.5.14p3, result is always int (signed)
+ break;
+ case tok::comma:
+ // Comma is invalid in pp expressions in c89/c++ mode, but is valid in C99
+ // if not being evaluated.
+ if (!PP.getLangOptions().C99 || ValueLive)
+ PP.Diag(OpLoc, diag::ext_pp_comma_expr)
+ << LHS.getRange() << RHS.getRange();
+ Res = RHS.Val; // LHS = LHS,RHS -> RHS.
+ break;
+ case tok::question: {
+ // Parse the : part of the expression.
+ if (PeekTok.isNot(tok::colon)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_expected_colon)
+ << LHS.getRange(), RHS.getRange();
+ PP.Diag(OpLoc, diag::note_matching) << "?";
+ return true;
+ }
+ // Consume the :.
+ PP.LexNonComment(PeekTok);
+
+ // Evaluate the value after the :.
+ bool AfterColonLive = ValueLive && LHS.Val == 0;
+ PPValue AfterColonVal(LHS.getBitWidth());
+ DefinedTracker DT;
+ if (EvaluateValue(AfterColonVal, PeekTok, DT, AfterColonLive, PP))
+ return true;
+
+ // Parse anything after the : with the same precedence as ?. We allow
+ // things of equal precedence because ?: is right associative.
+ if (EvaluateDirectiveSubExpr(AfterColonVal, ThisPrec,
+ PeekTok, AfterColonLive, PP))
+ return true;
+
+ // Now that we have the condition, the LHS and the RHS of the :, evaluate.
+ Res = LHS.Val != 0 ? RHS.Val : AfterColonVal.Val;
+ RHS.setEnd(AfterColonVal.getRange().getEnd());
+
+ // Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
+ // either operand is unsigned.
+ Res.setIsUnsigned(RHS.isUnsigned() | AfterColonVal.isUnsigned());
+
+ // Figure out the precedence of the token after the : part.
+ PeekPrec = getPrecedence(PeekTok.getKind());
+ break;
+ }
+ case tok::colon:
+ // Don't allow :'s to float around without being part of ?: exprs.
+ PP.Diag(OpLoc, diag::err_pp_colon_without_question)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+
+ // If this operator is live and overflowed, report the issue.
+ if (Overflow && ValueLive)
+ PP.Diag(OpLoc, diag::warn_pp_expr_overflow)
+ << LHS.getRange() << RHS.getRange();
+
+ // Put the result back into 'LHS' for our next iteration.
+ LHS.Val = Res;
+ LHS.setEnd(RHS.getRange().getEnd());
+ }
+
+ return false;
+}
+
+/// EvaluateDirectiveExpression - Evaluate an integer constant expression that
+/// may occur after a #if or #elif directive. If the expression is equivalent
+/// to "!defined(X)" return X in IfNDefMacro.
+bool Preprocessor::
+EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
+ // Peek ahead one token.
+ Token Tok;
+ Lex(Tok);
+
+ // C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.
+ unsigned BitWidth = getTargetInfo().getIntMaxTWidth();
+
+ PPValue ResVal(BitWidth);
+ DefinedTracker DT;
+ if (EvaluateValue(ResVal, Tok, DT, true, *this)) {
+ // Parse error, skip the rest of the macro line.
+ if (Tok.isNot(tok::eom))
+ DiscardUntilEndOfDirective();
+ return false;
+ }
+
+ // If we are at the end of the expression after just parsing a value, there
+ // must be no (unparenthesized) binary operators involved, so we can exit
+ // directly.
+ if (Tok.is(tok::eom)) {
+ // If the expression we parsed was of the form !defined(macro), return the
+ // macro in IfNDefMacro.
+ if (DT.State == DefinedTracker::NotDefinedMacro)
+ IfNDefMacro = DT.TheMacro;
+
+ return ResVal.Val != 0;
+ }
+
+ // Otherwise, we must have a binary operator (e.g. "#if 1 < 2"), so parse the
+ // operator and the stuff after it.
+ if (EvaluateDirectiveSubExpr(ResVal, getPrecedence(tok::question),
+ Tok, true, *this)) {
+ // Parse error, skip the rest of the macro line.
+ if (Tok.isNot(tok::eom))
+ DiscardUntilEndOfDirective();
+ return false;
+ }
+
+ // If we aren't at the tok::eom token, something bad happened, like an extra
+ // ')' token.
+ if (Tok.isNot(tok::eom)) {
+ Diag(Tok, diag::err_pp_expected_eol);
+ DiscardUntilEndOfDirective();
+ }
+
+ return ResVal.Val != 0;
+}
+
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
new file mode 100644
index 0000000..2a05ba3
--- /dev/null
+++ b/lib/Lex/PPLexerChange.cpp
@@ -0,0 +1,345 @@
+//===--- PPLexerChange.cpp - Handle changing lexers in the preprocessor ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements pieces of the Preprocessor interface that manage the
+// current lexer stack.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+using namespace clang;
+
+PPCallbacks::~PPCallbacks() {}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Methods.
+//===----------------------------------------------------------------------===//
+
+/// isInPrimaryFile - Return true if we're in the top-level file, not in a
+/// #include. This looks through macro expansions and active _Pragma lexers.
+bool Preprocessor::isInPrimaryFile() const {
+ if (IsFileLexer())
+ return IncludeMacroStack.empty();
+
+ // If there are any stacked lexers, we're in a #include.
+ assert(IsFileLexer(IncludeMacroStack[0]) &&
+ "Top level include stack isn't our primary lexer?");
+ for (unsigned i = 1, e = IncludeMacroStack.size(); i != e; ++i)
+ if (IsFileLexer(IncludeMacroStack[i]))
+ return false;
+ return true;
+}
+
+/// getCurrentLexer - Return the current file lexer being lexed from. Note
+/// that this ignores any potentially active macro expansions and _Pragma
+/// expansions going on at the time.
+PreprocessorLexer *Preprocessor::getCurrentFileLexer() const {
+ if (IsFileLexer())
+ return CurPPLexer;
+
+ // Look for a stacked lexer.
+ for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
+ const IncludeStackInfo& ISI = IncludeMacroStack[i-1];
+ if (IsFileLexer(ISI))
+ return ISI.ThePPLexer;
+ }
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods for Entering and Callbacks for leaving various contexts
+//===----------------------------------------------------------------------===//
+
+/// EnterSourceFile - Add a source file to the top of the include stack and
+/// start lexing tokens from it instead of the current buffer. Return true
+/// on failure.
+void Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir) {
+ assert(CurTokenLexer == 0 && "Cannot #include a file inside a macro!");
+ ++NumEnteredSourceFiles;
+
+ if (MaxIncludeStackDepth < IncludeMacroStack.size())
+ MaxIncludeStackDepth = IncludeMacroStack.size();
+
+ if (PTH) {
+ if (PTHLexer *PL = PTH->CreateLexer(FID))
+ return EnterSourceFileWithPTH(PL, CurDir);
+ }
+ EnterSourceFileWithLexer(new Lexer(FID, *this), CurDir);
+}
+
+/// EnterSourceFileWithLexer - Add a source file to the top of the include stack
+/// and start lexing tokens from it instead of the current buffer.
+void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
+ const DirectoryLookup *CurDir) {
+
+ // Add the current lexer to the include stack.
+ if (CurPPLexer || CurTokenLexer)
+ PushIncludeMacroStack();
+
+ CurLexer.reset(TheLexer);
+ CurPPLexer = TheLexer;
+ CurDirLookup = CurDir;
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks && !CurLexer->Is_PragmaLexer) {
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(CurLexer->getFileLoc());
+
+ Callbacks->FileChanged(CurLexer->getFileLoc(),
+ PPCallbacks::EnterFile, FileType);
+ }
+}
+
+/// EnterSourceFileWithPTH - Add a source file to the top of the include stack
+/// and start getting tokens from it using the PTH cache.
+void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
+ const DirectoryLookup *CurDir) {
+
+ if (CurPPLexer || CurTokenLexer)
+ PushIncludeMacroStack();
+
+ CurDirLookup = CurDir;
+ CurPTHLexer.reset(PL);
+ CurPPLexer = CurPTHLexer.get();
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks) {
+ FileID FID = CurPPLexer->getFileID();
+ SourceLocation EnterLoc = SourceMgr.getLocForStartOfFile(FID);
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(EnterLoc);
+ Callbacks->FileChanged(EnterLoc, PPCallbacks::EnterFile, FileType);
+ }
+}
+
+/// EnterMacro - Add a Macro to the top of the include stack and start lexing
+/// tokens from it instead of the current buffer.
+void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
+ MacroArgs *Args) {
+ PushIncludeMacroStack();
+ CurDirLookup = 0;
+
+ if (NumCachedTokenLexers == 0) {
+ CurTokenLexer.reset(new TokenLexer(Tok, ILEnd, Args, *this));
+ } else {
+ CurTokenLexer.reset(TokenLexerCache[--NumCachedTokenLexers]);
+ CurTokenLexer->Init(Tok, ILEnd, Args);
+ }
+}
+
+/// EnterTokenStream - Add a "macro" context to the top of the include stack,
+/// which will cause the lexer to start returning the specified tokens.
+///
+/// If DisableMacroExpansion is true, tokens lexed from the token stream will
+/// not be subject to further macro expansion. Otherwise, these tokens will
+/// be re-macro-expanded when/if expansion is enabled.
+///
+/// If OwnsTokens is false, this method assumes that the specified stream of
+/// tokens has a permanent owner somewhere, so they do not need to be copied.
+/// If it is true, it assumes the array of tokens is allocated with new[] and
+/// must be freed.
+///
+void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
+ bool DisableMacroExpansion,
+ bool OwnsTokens) {
+ // Save our current state.
+ PushIncludeMacroStack();
+ CurDirLookup = 0;
+
+ // Create a macro expander to expand from the specified token stream.
+ if (NumCachedTokenLexers == 0) {
+ CurTokenLexer.reset(new TokenLexer(Toks, NumToks, DisableMacroExpansion,
+ OwnsTokens, *this));
+ } else {
+ CurTokenLexer.reset(TokenLexerCache[--NumCachedTokenLexers]);
+ CurTokenLexer->Init(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
+ }
+}
+
+/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
+/// the current file. This either returns the EOF token or pops a level off
+/// the include stack and keeps going.
+bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
+ assert(!CurTokenLexer &&
+ "Ending a file when currently in a macro!");
+
+ // See if this file had a controlling macro.
+ if (CurPPLexer) { // Not ending a macro, ignore it.
+ if (const IdentifierInfo *ControllingMacro =
+ CurPPLexer->MIOpt.GetControllingMacroAtEndOfFile()) {
+ // Okay, this has a controlling macro, remember in HeaderFileInfo.
+ if (const FileEntry *FE =
+ SourceMgr.getFileEntryForID(CurPPLexer->getFileID()))
+ HeaderInfo.SetFileControllingMacro(FE, ControllingMacro);
+ }
+ }
+
+ // If this is a #include'd file, pop it off the include stack and continue
+ // lexing the #includer file.
+ if (!IncludeMacroStack.empty()) {
+ // We're done with the #included file.
+ RemoveTopOfLexerStack();
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks && !isEndOfMacro && CurPPLexer) {
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(CurPPLexer->getSourceLocation());
+ Callbacks->FileChanged(CurPPLexer->getSourceLocation(),
+ PPCallbacks::ExitFile, FileType);
+ }
+
+ // Client should lex another token.
+ return false;
+ }
+
+ // If the file ends with a newline, form the EOF token on the newline itself,
+ // rather than "on the line following it", which doesn't exist. This makes
+ // diagnostics relating to the end of file include the last file that the user
+ // actually typed, which is goodness.
+ if (CurLexer) {
+ const char *EndPos = CurLexer->BufferEnd;
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r')) {
+ --EndPos;
+
+ // Handle \n\r and \r\n:
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r') &&
+ EndPos[-1] != EndPos[0])
+ --EndPos;
+ }
+
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ // We're done with the #included file.
+ CurLexer.reset();
+ } else {
+ assert(CurPTHLexer && "Got EOF but no current lexer set!");
+ CurPTHLexer->getEOF(Result);
+ CurPTHLexer.reset();
+ }
+
+ CurPPLexer = 0;
+
+ // This is the end of the top-level file. If the diag::pp_macro_not_used
+ // diagnostic is enabled, look for macros that have not been used.
+ if (getDiagnostics().getDiagnosticLevel(diag::pp_macro_not_used) !=
+ Diagnostic::Ignored) {
+ for (macro_iterator I = macro_begin(), E = macro_end(); I != E; ++I)
+ if (!I->second->isUsed())
+ Diag(I->second->getDefinitionLoc(), diag::pp_macro_not_used);
+ }
+ return true;
+}
+
+/// HandleEndOfTokenLexer - This callback is invoked when the current TokenLexer
+/// hits the end of its token stream.
+bool Preprocessor::HandleEndOfTokenLexer(Token &Result) {
+ assert(CurTokenLexer && !CurPPLexer &&
+ "Ending a macro when currently in a #include file!");
+
+ // Delete or cache the now-dead macro expander.
+ if (NumCachedTokenLexers == TokenLexerCacheSize)
+ CurTokenLexer.reset();
+ else
+ TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+
+ // Handle this like a #include file being popped off the stack.
+ return HandleEndOfFile(Result, true);
+}
+
+/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
+/// lexer stack. This should only be used in situations where the current
+/// state of the top-of-stack lexer is unknown.
+void Preprocessor::RemoveTopOfLexerStack() {
+ assert(!IncludeMacroStack.empty() && "Ran out of stack entries to load");
+
+ if (CurTokenLexer) {
+ // Delete or cache the now-dead macro expander.
+ if (NumCachedTokenLexers == TokenLexerCacheSize)
+ CurTokenLexer.reset();
+ else
+ TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+ }
+
+ PopIncludeMacroStack();
+}
+
+/// HandleMicrosoftCommentPaste - When the macro expander pastes together a
+/// comment (/##/) in microsoft mode, this method handles updating the current
+/// state, returning the token on the next source line.
+void Preprocessor::HandleMicrosoftCommentPaste(Token &Tok) {
+ assert(CurTokenLexer && !CurPPLexer &&
+ "Pasted comment can only be formed from macro");
+
+ // We handle this by scanning for the closest real lexer, switching it to
+ // raw mode and preprocessor mode. This will cause it to return \n as an
+ // explicit EOM token.
+ PreprocessorLexer *FoundLexer = 0;
+ bool LexerWasInPPMode = false;
+ for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+ IncludeStackInfo &ISI = *(IncludeMacroStack.end()-i-1);
+ if (ISI.ThePPLexer == 0) continue; // Scan for a real lexer.
+
+ // Once we find a real lexer, mark it as raw mode (disabling macro
+ // expansions) and preprocessor mode (return EOM). We know that the lexer
+ // was *not* in raw mode before, because the macro that the comment came
+ // from was expanded. However, it could have already been in preprocessor
+ // mode (#if COMMENT) in which case we have to return it to that mode and
+ // return EOM.
+ FoundLexer = ISI.ThePPLexer;
+ FoundLexer->LexingRawMode = true;
+ LexerWasInPPMode = FoundLexer->ParsingPreprocessorDirective;
+ FoundLexer->ParsingPreprocessorDirective = true;
+ break;
+ }
+
+ // Okay, we either found and switched over the lexer, or we didn't find a
+ // lexer. In either case, finish off the macro the comment came from, getting
+ // the next token.
+ if (!HandleEndOfTokenLexer(Tok)) Lex(Tok);
+
+ // Discarding comments as long as we don't have EOF or EOM. This 'comments
+ // out' the rest of the line, including any tokens that came from other macros
+ // that were active, as in:
+ // #define submacro a COMMENT b
+ // submacro c
+ // which should lex to 'a' only: 'b' and 'c' should be removed.
+ while (Tok.isNot(tok::eom) && Tok.isNot(tok::eof))
+ Lex(Tok);
+
+ // If we got an eom token, then we successfully found the end of the line.
+ if (Tok.is(tok::eom)) {
+ assert(FoundLexer && "Can't get end of line without an active lexer");
+ // Restore the lexer back to normal mode instead of raw mode.
+ FoundLexer->LexingRawMode = false;
+
+ // If the lexer was already in preprocessor mode, just return the EOM token
+ // to finish the preprocessor line.
+ if (LexerWasInPPMode) return;
+
+ // Otherwise, switch out of PP mode and return the next lexed token.
+ FoundLexer->ParsingPreprocessorDirective = false;
+ return Lex(Tok);
+ }
+
+ // If we got an EOF token, then we reached the end of the token stream but
+ // didn't find an explicit \n. This can only happen if there was no lexer
+ // active (an active lexer would return EOM at EOF if there was no \n in
+ // preprocessor directive mode), so just return EOF as our token.
+ assert(!FoundLexer && "Lexer should return EOM before EOF in PP mode");
+}
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
new file mode 100644
index 0000000..55222c9
--- /dev/null
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -0,0 +1,605 @@
+//===--- MacroExpansion.cpp - Top level Macro Expansion -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top level handling of macro expasion for the
+// preprocessor.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include <cstdio>
+#include <ctime>
+using namespace clang;
+
+/// setMacroInfo - Specify a macro for this identifier.
+///
+void Preprocessor::setMacroInfo(IdentifierInfo *II, MacroInfo *MI) {
+ if (MI) {
+ Macros[II] = MI;
+ II->setHasMacroDefinition(true);
+ } else if (II->hasMacroDefinition()) {
+ Macros.erase(II);
+ II->setHasMacroDefinition(false);
+ }
+}
+
+/// RegisterBuiltinMacro - Register the specified identifier in the identifier
+/// table and mark it as a builtin macro to be expanded.
+IdentifierInfo *Preprocessor::RegisterBuiltinMacro(const char *Name) {
+ // Get the identifier.
+ IdentifierInfo *Id = getIdentifierInfo(Name);
+
+ // Mark it as being a macro that is builtin.
+ MacroInfo *MI = AllocateMacroInfo(SourceLocation());
+ MI->setIsBuiltinMacro();
+ setMacroInfo(Id, MI);
+ return Id;
+}
+
+
+/// RegisterBuiltinMacros - Register builtin macros, such as __LINE__ with the
+/// identifier table.
+void Preprocessor::RegisterBuiltinMacros() {
+ Ident__LINE__ = RegisterBuiltinMacro("__LINE__");
+ Ident__FILE__ = RegisterBuiltinMacro("__FILE__");
+ Ident__DATE__ = RegisterBuiltinMacro("__DATE__");
+ Ident__TIME__ = RegisterBuiltinMacro("__TIME__");
+ Ident__COUNTER__ = RegisterBuiltinMacro("__COUNTER__");
+ Ident_Pragma = RegisterBuiltinMacro("_Pragma");
+
+ // GCC Extensions.
+ Ident__BASE_FILE__ = RegisterBuiltinMacro("__BASE_FILE__");
+ Ident__INCLUDE_LEVEL__ = RegisterBuiltinMacro("__INCLUDE_LEVEL__");
+ Ident__TIMESTAMP__ = RegisterBuiltinMacro("__TIMESTAMP__");
+}
+
+/// isTrivialSingleTokenExpansion - Return true if MI, which has a single token
+/// in its expansion, currently expands to that token literally.
+static bool isTrivialSingleTokenExpansion(const MacroInfo *MI,
+ const IdentifierInfo *MacroIdent,
+ Preprocessor &PP) {
+ IdentifierInfo *II = MI->getReplacementToken(0).getIdentifierInfo();
+
+ // If the token isn't an identifier, it's always literally expanded.
+ if (II == 0) return true;
+
+ // If the identifier is a macro, and if that macro is enabled, it may be
+ // expanded so it's not a trivial expansion.
+ if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled() &&
+ // Fast expanding "#define X X" is ok, because X would be disabled.
+ II != MacroIdent)
+ return false;
+
+ // If this is an object-like macro invocation, it is safe to trivially expand
+ // it.
+ if (MI->isObjectLike()) return true;
+
+ // If this is a function-like macro invocation, it's safe to trivially expand
+ // as long as the identifier is not a macro argument.
+ for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end();
+ I != E; ++I)
+ if (*I == II)
+ return false; // Identifier is a macro argument.
+
+ return true;
+}
+
+
+/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
+/// lexed is a '('. If so, consume the token and return true, if not, this
+/// method should have no observable side-effect on the lexed tokens.
+bool Preprocessor::isNextPPTokenLParen() {
+ // Do some quick tests for rejection cases.
+ unsigned Val;
+ if (CurLexer)
+ Val = CurLexer->isNextPPTokenLParen();
+ else if (CurPTHLexer)
+ Val = CurPTHLexer->isNextPPTokenLParen();
+ else
+ Val = CurTokenLexer->isNextTokenLParen();
+
+ if (Val == 2) {
+ // We have run off the end. If it's a source file we don't
+ // examine enclosing ones (C99 5.1.1.2p4). Otherwise walk up the
+ // macro stack.
+ if (CurPPLexer)
+ return false;
+ for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
+ IncludeStackInfo &Entry = IncludeMacroStack[i-1];
+ if (Entry.TheLexer)
+ Val = Entry.TheLexer->isNextPPTokenLParen();
+ else if (Entry.ThePTHLexer)
+ Val = Entry.ThePTHLexer->isNextPPTokenLParen();
+ else
+ Val = Entry.TheTokenLexer->isNextTokenLParen();
+
+ if (Val != 2)
+ break;
+
+ // Ran off the end of a source file?
+ if (Entry.ThePPLexer)
+ return false;
+ }
+ }
+
+ // Okay, if we know that the token is a '(', lex it and return. Otherwise we
+ // have found something that isn't a '(' or we found the end of the
+ // translation unit. In either case, return false.
+ return Val == 1;
+}
+
+/// HandleMacroExpandedIdentifier - If an identifier token is read that is to be
+/// expanded as a macro, handle it and return the next token as 'Identifier'.
+bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
+ MacroInfo *MI) {
+ if (Callbacks) Callbacks->MacroExpands(Identifier, MI);
+
+ // If this is a macro exapnsion in the "#if !defined(x)" line for the file,
+ // then the macro could expand to different things in other contexts, we need
+ // to disable the optimization in this case.
+ if (CurPPLexer) CurPPLexer->MIOpt.ExpandedMacro();
+
+ // If this is a builtin macro, like __LINE__ or _Pragma, handle it specially.
+ if (MI->isBuiltinMacro()) {
+ ExpandBuiltinMacro(Identifier);
+ return false;
+ }
+
+ /// Args - If this is a function-like macro expansion, this contains,
+ /// for each macro argument, the list of tokens that were provided to the
+ /// invocation.
+ MacroArgs *Args = 0;
+
+ // Remember where the end of the instantiation occurred. For an object-like
+ // macro, this is the identifier. For a function-like macro, this is the ')'.
+ SourceLocation InstantiationEnd = Identifier.getLocation();
+
+ // If this is a function-like macro, read the arguments.
+ if (MI->isFunctionLike()) {
+ // C99 6.10.3p10: If the preprocessing token immediately after the the macro
+ // name isn't a '(', this macro should not be expanded.
+ if (!isNextPPTokenLParen())
+ return true;
+
+ // Remember that we are now parsing the arguments to a macro invocation.
+ // Preprocessor directives used inside macro arguments are not portable, and
+ // this enables the warning.
+ InMacroArgs = true;
+ Args = ReadFunctionLikeMacroArgs(Identifier, MI, InstantiationEnd);
+
+ // Finished parsing args.
+ InMacroArgs = false;
+
+ // If there was an error parsing the arguments, bail out.
+ if (Args == 0) return false;
+
+ ++NumFnMacroExpanded;
+ } else {
+ ++NumMacroExpanded;
+ }
+
+ // Notice that this macro has been used.
+ MI->setIsUsed(true);
+
+ // If we started lexing a macro, enter the macro expansion body.
+
+ // If this macro expands to no tokens, don't bother to push it onto the
+ // expansion stack, only to take it right back off.
+ if (MI->getNumTokens() == 0) {
+ // No need for arg info.
+ if (Args) Args->destroy();
+
+ // Ignore this macro use, just return the next token in the current
+ // buffer.
+ bool HadLeadingSpace = Identifier.hasLeadingSpace();
+ bool IsAtStartOfLine = Identifier.isAtStartOfLine();
+
+ Lex(Identifier);
+
+ // If the identifier isn't on some OTHER line, inherit the leading
+ // whitespace/first-on-a-line property of this token. This handles
+ // stuff like "! XX," -> "! ," and " XX," -> " ,", when XX is
+ // empty.
+ if (!Identifier.isAtStartOfLine()) {
+ if (IsAtStartOfLine) Identifier.setFlag(Token::StartOfLine);
+ if (HadLeadingSpace) Identifier.setFlag(Token::LeadingSpace);
+ }
+ ++NumFastMacroExpanded;
+ return false;
+
+ } else if (MI->getNumTokens() == 1 &&
+ isTrivialSingleTokenExpansion(MI, Identifier.getIdentifierInfo(),
+ *this)) {
+ // Otherwise, if this macro expands into a single trivially-expanded
+ // token: expand it now. This handles common cases like
+ // "#define VAL 42".
+
+ // No need for arg info.
+ if (Args) Args->destroy();
+
+ // Propagate the isAtStartOfLine/hasLeadingSpace markers of the macro
+ // identifier to the expanded token.
+ bool isAtStartOfLine = Identifier.isAtStartOfLine();
+ bool hasLeadingSpace = Identifier.hasLeadingSpace();
+
+ // Remember where the token is instantiated.
+ SourceLocation InstantiateLoc = Identifier.getLocation();
+
+ // Replace the result token.
+ Identifier = MI->getReplacementToken(0);
+
+ // Restore the StartOfLine/LeadingSpace markers.
+ Identifier.setFlagValue(Token::StartOfLine , isAtStartOfLine);
+ Identifier.setFlagValue(Token::LeadingSpace, hasLeadingSpace);
+
+ // Update the tokens location to include both its instantiation and physical
+ // locations.
+ SourceLocation Loc =
+ SourceMgr.createInstantiationLoc(Identifier.getLocation(), InstantiateLoc,
+ InstantiationEnd,Identifier.getLength());
+ Identifier.setLocation(Loc);
+
+ // If this is #define X X, we must mark the result as unexpandible.
+ if (IdentifierInfo *NewII = Identifier.getIdentifierInfo())
+ if (getMacroInfo(NewII) == MI)
+ Identifier.setFlag(Token::DisableExpand);
+
+ // Since this is not an identifier token, it can't be macro expanded, so
+ // we're done.
+ ++NumFastMacroExpanded;
+ return false;
+ }
+
+ // Start expanding the macro.
+ EnterMacro(Identifier, InstantiationEnd, Args);
+
+ // Now that the macro is at the top of the include stack, ask the
+ // preprocessor to read the next token from it.
+ Lex(Identifier);
+ return false;
+}
+
+/// ReadFunctionLikeMacroArgs - After reading "MACRO" and knowing that the next
+/// token is the '(' of the macro, this method is invoked to read all of the
+/// actual arguments specified for the macro invocation. This returns null on
+/// error.
+MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
+ MacroInfo *MI,
+ SourceLocation &MacroEnd) {
+ // The number of fixed arguments to parse.
+ unsigned NumFixedArgsLeft = MI->getNumArgs();
+ bool isVariadic = MI->isVariadic();
+
+ // Outer loop, while there are more arguments, keep reading them.
+ Token Tok;
+
+ // Read arguments as unexpanded tokens. This avoids issues, e.g., where
+ // an argument value in a macro could expand to ',' or '(' or ')'.
+ LexUnexpandedToken(Tok);
+ assert(Tok.is(tok::l_paren) && "Error computing l-paren-ness?");
+
+ // ArgTokens - Build up a list of tokens that make up each argument. Each
+ // argument is separated by an EOF token. Use a SmallVector so we can avoid
+ // heap allocations in the common case.
+ llvm::SmallVector<Token, 64> ArgTokens;
+
+ unsigned NumActuals = 0;
+ while (Tok.isNot(tok::r_paren)) {
+ assert((Tok.is(tok::l_paren) || Tok.is(tok::comma)) &&
+ "only expect argument separators here");
+
+ unsigned ArgTokenStart = ArgTokens.size();
+ SourceLocation ArgStartLoc = Tok.getLocation();
+
+ // C99 6.10.3p11: Keep track of the number of l_parens we have seen. Note
+ // that we already consumed the first one.
+ unsigned NumParens = 0;
+
+ while (1) {
+ // Read arguments as unexpanded tokens. This avoids issues, e.g., where
+ // an argument value in a macro could expand to ',' or '(' or ')'.
+ LexUnexpandedToken(Tok);
+
+ if (Tok.is(tok::eof) || Tok.is(tok::eom)) { // "#if f(<eof>" & "#if f(\n"
+ Diag(MacroName, diag::err_unterm_macro_invoc);
+ // Do not lose the EOF/EOM. Return it to the client.
+ MacroName = Tok;
+ return 0;
+ } else if (Tok.is(tok::r_paren)) {
+ // If we found the ) token, the macro arg list is done.
+ if (NumParens-- == 0) {
+ MacroEnd = Tok.getLocation();
+ break;
+ }
+ } else if (Tok.is(tok::l_paren)) {
+ ++NumParens;
+ } else if (Tok.is(tok::comma) && NumParens == 0) {
+ // Comma ends this argument if there are more fixed arguments expected.
+ // However, if this is a variadic macro, and this is part of the
+ // variadic part, then the comma is just an argument token.
+ if (!isVariadic) break;
+ if (NumFixedArgsLeft > 1)
+ break;
+ } else if (Tok.is(tok::comment) && !KeepMacroComments) {
+ // If this is a comment token in the argument list and we're just in
+ // -C mode (not -CC mode), discard the comment.
+ continue;
+ } else if (Tok.getIdentifierInfo() != 0) {
+ // Reading macro arguments can cause macros that we are currently
+ // expanding from to be popped off the expansion stack. Doing so causes
+ // them to be reenabled for expansion. Here we record whether any
+ // identifiers we lex as macro arguments correspond to disabled macros.
+ // If so, we mark the token as noexpand. This is a subtle aspect of
+ // C99 6.10.3.4p2.
+ if (MacroInfo *MI = getMacroInfo(Tok.getIdentifierInfo()))
+ if (!MI->isEnabled())
+ Tok.setFlag(Token::DisableExpand);
+ }
+ ArgTokens.push_back(Tok);
+ }
+
+ // If this was an empty argument list foo(), don't add this as an empty
+ // argument.
+ if (ArgTokens.empty() && Tok.getKind() == tok::r_paren)
+ break;
+
+ // If this is not a variadic macro, and too many args were specified, emit
+ // an error.
+ if (!isVariadic && NumFixedArgsLeft == 0) {
+ if (ArgTokens.size() != ArgTokenStart)
+ ArgStartLoc = ArgTokens[ArgTokenStart].getLocation();
+
+ // Emit the diagnostic at the macro name in case there is a missing ).
+ // Emitting it at the , could be far away from the macro name.
+ Diag(ArgStartLoc, diag::err_too_many_args_in_macro_invoc);
+ return 0;
+ }
+
+ // Empty arguments are standard in C99 and supported as an extension in
+ // other modes.
+ if (ArgTokens.size() == ArgTokenStart && !Features.C99)
+ Diag(Tok, diag::ext_empty_fnmacro_arg);
+
+ // Add a marker EOF token to the end of the token list for this argument.
+ Token EOFTok;
+ EOFTok.startToken();
+ EOFTok.setKind(tok::eof);
+ EOFTok.setLocation(Tok.getLocation());
+ EOFTok.setLength(0);
+ ArgTokens.push_back(EOFTok);
+ ++NumActuals;
+ assert(NumFixedArgsLeft != 0 && "Too many arguments parsed");
+ --NumFixedArgsLeft;
+ }
+
+ // Okay, we either found the r_paren. Check to see if we parsed too few
+ // arguments.
+ unsigned MinArgsExpected = MI->getNumArgs();
+
+ // See MacroArgs instance var for description of this.
+ bool isVarargsElided = false;
+
+ if (NumActuals < MinArgsExpected) {
+ // There are several cases where too few arguments is ok, handle them now.
+ if (NumActuals == 0 && MinArgsExpected == 1) {
+ // #define A(X) or #define A(...) ---> A()
+
+ // If there is exactly one argument, and that argument is missing,
+ // then we have an empty "()" argument empty list. This is fine, even if
+ // the macro expects one argument (the argument is just empty).
+ isVarargsElided = MI->isVariadic();
+ } else if (MI->isVariadic() &&
+ (NumActuals+1 == MinArgsExpected || // A(x, ...) -> A(X)
+ (NumActuals == 0 && MinArgsExpected == 2))) {// A(x,...) -> A()
+ // Varargs where the named vararg parameter is missing: ok as extension.
+ // #define A(x, ...)
+ // A("blah")
+ Diag(Tok, diag::ext_missing_varargs_arg);
+
+ // Remember this occurred, allowing us to elide the comma when used for
+ // cases like:
+ // #define A(x, foo...) blah(a, ## foo)
+ // #define B(x, ...) blah(a, ## __VA_ARGS__)
+ // #define C(...) blah(a, ## __VA_ARGS__)
+ // A(x) B(x) C()
+ isVarargsElided = true;
+ } else {
+ // Otherwise, emit the error.
+ Diag(Tok, diag::err_too_few_args_in_macro_invoc);
+ return 0;
+ }
+
+ // Add a marker EOF token to the end of the token list for this argument.
+ SourceLocation EndLoc = Tok.getLocation();
+ Tok.startToken();
+ Tok.setKind(tok::eof);
+ Tok.setLocation(EndLoc);
+ Tok.setLength(0);
+ ArgTokens.push_back(Tok);
+
+ // If we expect two arguments, add both as empty.
+ if (NumActuals == 0 && MinArgsExpected == 2)
+ ArgTokens.push_back(Tok);
+
+ } else if (NumActuals > MinArgsExpected && !MI->isVariadic()) {
+ // Emit the diagnostic at the macro name in case there is a missing ).
+ // Emitting it at the , could be far away from the macro name.
+ Diag(MacroName, diag::err_too_many_args_in_macro_invoc);
+ return 0;
+ }
+
+ return MacroArgs::create(MI, ArgTokens.data(), ArgTokens.size(),
+ isVarargsElided);
+}
+
+/// ComputeDATE_TIME - Compute the current time, enter it into the specified
+/// scratch buffer, then return DATELoc/TIMELoc locations with the position of
+/// the identifier tokens inserted.
+static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
+ Preprocessor &PP) {
+ time_t TT = time(0);
+ struct tm *TM = localtime(&TT);
+
+ static const char * const Months[] = {
+ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"
+ };
+
+ char TmpBuffer[100];
+ sprintf(TmpBuffer, "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
+ TM->tm_year+1900);
+
+ Token TmpTok;
+ TmpTok.startToken();
+ PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
+ DATELoc = TmpTok.getLocation();
+
+ sprintf(TmpBuffer, "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
+ PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
+ TIMELoc = TmpTok.getLocation();
+}
+
+/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
+/// as a builtin macro, handle it and return the next token as 'Tok'.
+void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
+ // Figure out which token this is.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ assert(II && "Can't be a macro without id info!");
+
+ // If this is an _Pragma directive, expand it, invoke the pragma handler, then
+ // lex the token after it.
+ if (II == Ident_Pragma)
+ return Handle_Pragma(Tok);
+
+ ++NumBuiltinMacroExpanded;
+
+ char TmpBuffer[100];
+
+ // Set up the return result.
+ Tok.setIdentifierInfo(0);
+ Tok.clearFlag(Token::NeedsCleaning);
+
+ if (II == Ident__LINE__) {
+ // C99 6.10.8: "__LINE__: The presumed line number (within the current
+ // source file) of the current source line (an integer constant)". This can
+ // be affected by #line.
+ SourceLocation Loc = Tok.getLocation();
+
+ // Advance to the location of the first _, this might not be the first byte
+ // of the token if it starts with an escaped newline.
+ Loc = AdvanceToTokenCharacter(Loc, 0);
+
+ // One wrinkle here is that GCC expands __LINE__ to location of the *end* of
+ // a macro instantiation. This doesn't matter for object-like macros, but
+ // can matter for a function-like macro that expands to contain __LINE__.
+ // Skip down through instantiation points until we find a file loc for the
+ // end of the instantiation history.
+ Loc = SourceMgr.getInstantiationRange(Loc).second;
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Loc);
+
+ // __LINE__ expands to a simple numeric value.
+ sprintf(TmpBuffer, "%u", PLoc.getLine());
+ Tok.setKind(tok::numeric_constant);
+ CreateString(TmpBuffer, strlen(TmpBuffer), Tok, Tok.getLocation());
+ } else if (II == Ident__FILE__ || II == Ident__BASE_FILE__) {
+ // C99 6.10.8: "__FILE__: The presumed name of the current source file (a
+ // character string literal)". This can be affected by #line.
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+
+ // __BASE_FILE__ is a GNU extension that returns the top of the presumed
+ // #include stack instead of the current file.
+ if (II == Ident__BASE_FILE__) {
+ Diag(Tok, diag::ext_pp_base_file);
+ SourceLocation NextLoc = PLoc.getIncludeLoc();
+ while (NextLoc.isValid()) {
+ PLoc = SourceMgr.getPresumedLoc(NextLoc);
+ NextLoc = PLoc.getIncludeLoc();
+ }
+ }
+
+ // Escape this filename. Turn '\' -> '\\' '"' -> '\"'
+ std::string FN = PLoc.getFilename();
+ FN = '"' + Lexer::Stringify(FN) + '"';
+ Tok.setKind(tok::string_literal);
+ CreateString(&FN[0], FN.size(), Tok, Tok.getLocation());
+ } else if (II == Ident__DATE__) {
+ if (!DATELoc.isValid())
+ ComputeDATE_TIME(DATELoc, TIMELoc, *this);
+ Tok.setKind(tok::string_literal);
+ Tok.setLength(strlen("\"Mmm dd yyyy\""));
+ Tok.setLocation(SourceMgr.createInstantiationLoc(DATELoc, Tok.getLocation(),
+ Tok.getLocation(),
+ Tok.getLength()));
+ } else if (II == Ident__TIME__) {
+ if (!TIMELoc.isValid())
+ ComputeDATE_TIME(DATELoc, TIMELoc, *this);
+ Tok.setKind(tok::string_literal);
+ Tok.setLength(strlen("\"hh:mm:ss\""));
+ Tok.setLocation(SourceMgr.createInstantiationLoc(TIMELoc, Tok.getLocation(),
+ Tok.getLocation(),
+ Tok.getLength()));
+ } else if (II == Ident__INCLUDE_LEVEL__) {
+ Diag(Tok, diag::ext_pp_include_level);
+
+ // Compute the presumed include depth of this token. This can be affected
+ // by GNU line markers.
+ unsigned Depth = 0;
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+ for (; PLoc.isValid(); ++Depth)
+ PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+
+ // __INCLUDE_LEVEL__ expands to a simple numeric value.
+ sprintf(TmpBuffer, "%u", Depth);
+ Tok.setKind(tok::numeric_constant);
+ CreateString(TmpBuffer, strlen(TmpBuffer), Tok, Tok.getLocation());
+ } else if (II == Ident__TIMESTAMP__) {
+ // MSVC, ICC, GCC, VisualAge C++ extension. The generated string should be
+ // of the form "Ddd Mmm dd hh::mm::ss yyyy", which is returned by asctime.
+ Diag(Tok, diag::ext_pp_timestamp);
+
+ // Get the file that we are lexing out of. If we're currently lexing from
+ // a macro, dig into the include stack.
+ const FileEntry *CurFile = 0;
+ PreprocessorLexer *TheLexer = getCurrentFileLexer();
+
+ if (TheLexer)
+ CurFile = SourceMgr.getFileEntryForID(TheLexer->getFileID());
+
+ // If this file is older than the file it depends on, emit a diagnostic.
+ const char *Result;
+ if (CurFile) {
+ time_t TT = CurFile->getModificationTime();
+ struct tm *TM = localtime(&TT);
+ Result = asctime(TM);
+ } else {
+ Result = "??? ??? ?? ??:??:?? ????\n";
+ }
+ TmpBuffer[0] = '"';
+ strcpy(TmpBuffer+1, Result);
+ unsigned Len = strlen(TmpBuffer);
+ TmpBuffer[Len] = '"'; // Replace the newline with a quote.
+ Tok.setKind(tok::string_literal);
+ CreateString(TmpBuffer, Len+1, Tok, Tok.getLocation());
+ } else if (II == Ident__COUNTER__) {
+ Diag(Tok, diag::ext_pp_counter);
+
+ // __COUNTER__ expands to a simple numeric value.
+ sprintf(TmpBuffer, "%u", CounterValue++);
+ Tok.setKind(tok::numeric_constant);
+ CreateString(TmpBuffer, strlen(TmpBuffer), Tok, Tok.getLocation());
+ } else {
+ assert(0 && "Unknown identifier!");
+ }
+}
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
new file mode 100644
index 0000000..916bdef
--- /dev/null
+++ b/lib/Lex/PTHLexer.cpp
@@ -0,0 +1,701 @@
+//===--- PTHLexer.cpp - Lex from a token stream ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PTHLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Lex/Token.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <sys/stat.h>
+using namespace clang;
+using namespace clang::io;
+
+#define DISK_TOKEN_SIZE (1+1+2+4+4)
+
+//===----------------------------------------------------------------------===//
+// PTHLexer methods.
+//===----------------------------------------------------------------------===//
+
+PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
+ const unsigned char *ppcond, PTHManager &PM)
+ : PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), LastHashTokPtr(0),
+ PPCond(ppcond), CurPPCondPtr(ppcond), PTHMgr(PM) {
+
+ FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
+}
+
+void PTHLexer::Lex(Token& Tok) {
+LexNextToken:
+
+ //===--------------------------------------==//
+ // Read the raw token data.
+ //===--------------------------------------==//
+
+ // Shadow CurPtr into an automatic variable.
+ const unsigned char *CurPtrShadow = CurPtr;
+
+ // Read in the data for the token.
+ unsigned Word0 = ReadLE32(CurPtrShadow);
+ uint32_t IdentifierID = ReadLE32(CurPtrShadow);
+ uint32_t FileOffset = ReadLE32(CurPtrShadow);
+
+ tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
+ Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
+ uint32_t Len = Word0 >> 16;
+
+ CurPtr = CurPtrShadow;
+
+ //===--------------------------------------==//
+ // Construct the token itself.
+ //===--------------------------------------==//
+
+ Tok.startToken();
+ Tok.setKind(TKind);
+ Tok.setFlag(TFlags);
+ assert(!LexingRawMode);
+ Tok.setLocation(FileStartLoc.getFileLocWithOffset(FileOffset));
+ Tok.setLength(Len);
+
+ // Handle identifiers.
+ if (Tok.isLiteral()) {
+ Tok.setLiteralData((const char*) (PTHMgr.SpellingBase + IdentifierID));
+ }
+ else if (IdentifierID) {
+ MIOpt.ReadToken();
+ IdentifierInfo *II = PTHMgr.GetIdentifierInfo(IdentifierID-1);
+
+ Tok.setIdentifierInfo(II);
+
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ Tok.setKind(II->getTokenID());
+
+ if (II->isHandleIdentifierCase())
+ PP->HandleIdentifier(Tok);
+ return;
+ }
+
+ //===--------------------------------------==//
+ // Process the token.
+ //===--------------------------------------==//
+#if 0
+ SourceManager& SM = PP->getSourceManager();
+ llvm::cerr << SM.getFileEntryForID(FileID)->getName()
+ << ':' << SM.getLogicalLineNumber(Tok.getLocation())
+ << ':' << SM.getLogicalColumnNumber(Tok.getLocation())
+ << '\n';
+#endif
+
+ if (TKind == tok::eof) {
+ // Save the end-of-file token.
+ EofToken = Tok;
+
+ Preprocessor *PPCache = PP;
+
+ assert(!ParsingPreprocessorDirective);
+ assert(!LexingRawMode);
+
+ // FIXME: Issue diagnostics similar to Lexer.
+ if (PP->HandleEndOfFile(Tok, false))
+ return;
+
+ assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
+ return PPCache->Lex(Tok);
+ }
+
+ if (TKind == tok::hash && Tok.isAtStartOfLine()) {
+ LastHashTokPtr = CurPtr - DISK_TOKEN_SIZE;
+ assert(!LexingRawMode);
+ PP->HandleDirective(Tok);
+
+ if (PP->isCurrentLexer(this))
+ goto LexNextToken;
+
+ return PP->Lex(Tok);
+ }
+
+ if (TKind == tok::eom) {
+ assert(ParsingPreprocessorDirective);
+ ParsingPreprocessorDirective = false;
+ return;
+ }
+
+ MIOpt.ReadToken();
+}
+
+// FIXME: We can just grab the last token instead of storing a copy
+// into EofToken.
+void PTHLexer::getEOF(Token& Tok) {
+ assert(EofToken.is(tok::eof));
+ Tok = EofToken;
+}
+
+void PTHLexer::DiscardToEndOfLine() {
+ assert(ParsingPreprocessorDirective && ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+
+ // We assume that if the preprocessor wishes to discard to the end of
+ // the line that it also means to end the current preprocessor directive.
+ ParsingPreprocessorDirective = false;
+
+ // Skip tokens by only peeking at their token kind and the flags.
+ // We don't need to actually reconstruct full tokens from the token buffer.
+ // This saves some copies and it also reduces IdentifierInfo* lookup.
+ const unsigned char* p = CurPtr;
+ while (1) {
+ // Read the token kind. Are we at the end of the file?
+ tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
+ if (x == tok::eof) break;
+
+ // Read the token flags. Are we at the start of the next line?
+ Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
+ if (y & Token::StartOfLine) break;
+
+ // Skip to the next token.
+ p += DISK_TOKEN_SIZE;
+ }
+
+ CurPtr = p;
+}
+
+/// SkipBlock - Used by Preprocessor to skip the current conditional block.
+bool PTHLexer::SkipBlock() {
+ assert(CurPPCondPtr && "No cached PP conditional information.");
+ assert(LastHashTokPtr && "No known '#' token.");
+
+ const unsigned char* HashEntryI = 0;
+ uint32_t Offset;
+ uint32_t TableIdx;
+
+ do {
+ // Read the token offset from the side-table.
+ Offset = ReadLE32(CurPPCondPtr);
+
+ // Read the target table index from the side-table.
+ TableIdx = ReadLE32(CurPPCondPtr);
+
+ // Compute the actual memory address of the '#' token data for this entry.
+ HashEntryI = TokBuf + Offset;
+
+ // Optmization: "Sibling jumping". #if...#else...#endif blocks can
+ // contain nested blocks. In the side-table we can jump over these
+ // nested blocks instead of doing a linear search if the next "sibling"
+ // entry is not at a location greater than LastHashTokPtr.
+ if (HashEntryI < LastHashTokPtr && TableIdx) {
+ // In the side-table we are still at an entry for a '#' token that
+ // is earlier than the last one we saw. Check if the location we would
+ // stride gets us closer.
+ const unsigned char* NextPPCondPtr =
+ PPCond + TableIdx*(sizeof(uint32_t)*2);
+ assert(NextPPCondPtr >= CurPPCondPtr);
+ // Read where we should jump to.
+ uint32_t TmpOffset = ReadLE32(NextPPCondPtr);
+ const unsigned char* HashEntryJ = TokBuf + TmpOffset;
+
+ if (HashEntryJ <= LastHashTokPtr) {
+ // Jump directly to the next entry in the side table.
+ HashEntryI = HashEntryJ;
+ Offset = TmpOffset;
+ TableIdx = ReadLE32(NextPPCondPtr);
+ CurPPCondPtr = NextPPCondPtr;
+ }
+ }
+ }
+ while (HashEntryI < LastHashTokPtr);
+ assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
+ assert(TableIdx && "No jumping from #endifs.");
+
+ // Update our side-table iterator.
+ const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
+ assert(NextPPCondPtr >= CurPPCondPtr);
+ CurPPCondPtr = NextPPCondPtr;
+
+ // Read where we should jump to.
+ HashEntryI = TokBuf + ReadLE32(NextPPCondPtr);
+ uint32_t NextIdx = ReadLE32(NextPPCondPtr);
+
+ // By construction NextIdx will be zero if this is a #endif. This is useful
+ // to know to obviate lexing another token.
+ bool isEndif = NextIdx == 0;
+
+ // This case can occur when we see something like this:
+ //
+ // #if ...
+ // /* a comment or nothing */
+ // #elif
+ //
+ // If we are skipping the first #if block it will be the case that CurPtr
+ // already points 'elif'. Just return.
+
+ if (CurPtr > HashEntryI) {
+ assert(CurPtr == HashEntryI + DISK_TOKEN_SIZE);
+ // Did we reach a #endif? If so, go ahead and consume that token as well.
+ if (isEndif)
+ CurPtr += DISK_TOKEN_SIZE*2;
+ else
+ LastHashTokPtr = HashEntryI;
+
+ return isEndif;
+ }
+
+ // Otherwise, we need to advance. Update CurPtr to point to the '#' token.
+ CurPtr = HashEntryI;
+
+ // Update the location of the last observed '#'. This is useful if we
+ // are skipping multiple blocks.
+ LastHashTokPtr = CurPtr;
+
+ // Skip the '#' token.
+ assert(((tok::TokenKind)*CurPtr) == tok::hash);
+ CurPtr += DISK_TOKEN_SIZE;
+
+ // Did we reach a #endif? If so, go ahead and consume that token as well.
+ if (isEndif) { CurPtr += DISK_TOKEN_SIZE*2; }
+
+ return isEndif;
+}
+
+SourceLocation PTHLexer::getSourceLocation() {
+ // getSourceLocation is not on the hot path. It is used to get the location
+ // of the next token when transitioning back to this lexer when done
+ // handling a #included file. Just read the necessary data from the token
+ // data buffer to construct the SourceLocation object.
+ // NOTE: This is a virtual function; hence it is defined out-of-line.
+ const unsigned char *OffsetPtr = CurPtr + (DISK_TOKEN_SIZE - 4);
+ uint32_t Offset = ReadLE32(OffsetPtr);
+ return FileStartLoc.getFileLocWithOffset(Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// PTH file lookup: map from strings to file data.
+//===----------------------------------------------------------------------===//
+
+/// PTHFileLookup - This internal data structure is used by the PTHManager
+/// to map from FileEntry objects managed by FileManager to offsets within
+/// the PTH file.
+namespace {
+class VISIBILITY_HIDDEN PTHFileData {
+ const uint32_t TokenOff;
+ const uint32_t PPCondOff;
+public:
+ PTHFileData(uint32_t tokenOff, uint32_t ppCondOff)
+ : TokenOff(tokenOff), PPCondOff(ppCondOff) {}
+
+ uint32_t getTokenOffset() const { return TokenOff; }
+ uint32_t getPPCondOffset() const { return PPCondOff; }
+};
+
+
+class VISIBILITY_HIDDEN PTHFileLookupCommonTrait {
+public:
+ typedef std::pair<unsigned char, const char*> internal_key_type;
+
+ static unsigned ComputeHash(internal_key_type x) {
+ return BernsteinHash(x.second);
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ unsigned keyLen = (unsigned) ReadUnalignedLE16(d);
+ unsigned dataLen = (unsigned) *(d++);
+ return std::make_pair(keyLen, dataLen);
+ }
+
+ static internal_key_type ReadKey(const unsigned char* d, unsigned) {
+ unsigned char k = *(d++); // Read the entry kind.
+ return std::make_pair(k, (const char*) d);
+ }
+};
+
+class VISIBILITY_HIDDEN PTHFileLookupTrait : public PTHFileLookupCommonTrait {
+public:
+ typedef const FileEntry* external_key_type;
+ typedef PTHFileData data_type;
+
+ static internal_key_type GetInternalKey(const FileEntry* FE) {
+ return std::make_pair((unsigned char) 0x1, FE->getName());
+ }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ return a.first == b.first && strcmp(a.second, b.second) == 0;
+ }
+
+ static PTHFileData ReadData(const internal_key_type& k,
+ const unsigned char* d, unsigned) {
+ assert(k.first == 0x1 && "Only file lookups can match!");
+ uint32_t x = ::ReadUnalignedLE32(d);
+ uint32_t y = ::ReadUnalignedLE32(d);
+ return PTHFileData(x, y);
+ }
+};
+
+class VISIBILITY_HIDDEN PTHStringLookupTrait {
+public:
+ typedef uint32_t
+ data_type;
+
+ typedef const std::pair<const char*, unsigned>
+ external_key_type;
+
+ typedef external_key_type internal_key_type;
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
+ : false;
+ }
+
+ static unsigned ComputeHash(const internal_key_type& a) {
+ return BernsteinHash(a.first, a.second);
+ }
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ return std::make_pair((unsigned) ReadUnalignedLE16(d), sizeof(uint32_t));
+ }
+
+ static std::pair<const char*, unsigned>
+ ReadKey(const unsigned char* d, unsigned n) {
+ assert(n >= 2 && d[n-1] == '\0');
+ return std::make_pair((const char*) d, n-1);
+ }
+
+ static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
+ unsigned) {
+ return ::ReadUnalignedLE32(d);
+ }
+};
+
+} // end anonymous namespace
+
+typedef OnDiskChainedHashTable<PTHFileLookupTrait> PTHFileLookup;
+typedef OnDiskChainedHashTable<PTHStringLookupTrait> PTHStringIdLookup;
+
+//===----------------------------------------------------------------------===//
+// PTHManager methods.
+//===----------------------------------------------------------------------===//
+
+PTHManager::PTHManager(const llvm::MemoryBuffer* buf, void* fileLookup,
+ const unsigned char* idDataTable,
+ IdentifierInfo** perIDCache,
+ void* stringIdLookup, unsigned numIds,
+ const unsigned char* spellingBase,
+ const char* originalSourceFile)
+: Buf(buf), PerIDCache(perIDCache), FileLookup(fileLookup),
+ IdDataTable(idDataTable), StringIdLookup(stringIdLookup),
+ NumIds(numIds), PP(0), SpellingBase(spellingBase),
+ OriginalSourceFile(originalSourceFile) {}
+
+PTHManager::~PTHManager() {
+ delete Buf;
+ delete (PTHFileLookup*) FileLookup;
+ delete (PTHStringIdLookup*) StringIdLookup;
+ free(PerIDCache);
+}
+
+static void InvalidPTH(Diagnostic *Diags, Diagnostic::Level level,
+ const char* Msg = 0) {
+ if (!Diags) return;
+ if (!Msg) Msg = "Invalid or corrupted PTH file";
+ unsigned DiagID = Diags->getCustomDiagID(level, Msg);
+ Diags->Report(FullSourceLoc(), DiagID);
+}
+
+PTHManager* PTHManager::Create(const std::string& file, Diagnostic* Diags,
+ Diagnostic::Level level) {
+ // Memory map the PTH file.
+ llvm::OwningPtr<llvm::MemoryBuffer>
+ File(llvm::MemoryBuffer::getFile(file.c_str()));
+
+ if (!File) {
+ if (Diags) {
+ unsigned DiagID = Diags->getCustomDiagID(level,
+ "PTH file %0 could not be read");
+ Diags->Report(FullSourceLoc(), DiagID) << file;
+ }
+
+ return 0;
+ }
+
+ // Get the buffer ranges and check if there are at least three 32-bit
+ // words at the end of the file.
+ const unsigned char* BufBeg = (unsigned char*)File->getBufferStart();
+ const unsigned char* BufEnd = (unsigned char*)File->getBufferEnd();
+
+ // Check the prologue of the file.
+ if ((BufEnd - BufBeg) < (signed) (sizeof("cfe-pth") + 3 + 4) ||
+ memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth") - 1) != 0) {
+ InvalidPTH(Diags, level);
+ return 0;
+ }
+
+ // Read the PTH version.
+ const unsigned char *p = BufBeg + (sizeof("cfe-pth") - 1);
+ unsigned Version = ReadLE32(p);
+
+ if (Version != PTHManager::Version) {
+ InvalidPTH(Diags, level,
+ Version < PTHManager::Version
+ ? "PTH file uses an older PTH format that is no longer supported"
+ : "PTH file uses a newer PTH format that cannot be read");
+ return 0;
+ }
+
+ // Compute the address of the index table at the end of the PTH file.
+ const unsigned char *PrologueOffset = p;
+
+ if (PrologueOffset >= BufEnd) {
+ InvalidPTH(Diags, level);
+ return 0;
+ }
+
+ // Construct the file lookup table. This will be used for mapping from
+ // FileEntry*'s to cached tokens.
+ const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
+ const unsigned char* FileTable = BufBeg + ReadLE32(FileTableOffset);
+
+ if (!(FileTable > BufBeg && FileTable < BufEnd)) {
+ InvalidPTH(Diags, level);
+ return 0; // FIXME: Proper error diagnostic?
+ }
+
+ llvm::OwningPtr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
+
+ // Warn if the PTH file is empty. We still want to create a PTHManager
+ // as the PTH could be used with -include-pth.
+ if (FL->isEmpty())
+ InvalidPTH(Diags, level, "PTH file contains no cached source data");
+
+ // Get the location of the table mapping from persistent ids to the
+ // data needed to reconstruct identifiers.
+ const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
+ const unsigned char* IData = BufBeg + ReadLE32(IDTableOffset);
+
+ if (!(IData >= BufBeg && IData < BufEnd)) {
+ InvalidPTH(Diags, level);
+ return 0;
+ }
+
+ // Get the location of the hashtable mapping between strings and
+ // persistent IDs.
+ const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
+ const unsigned char* StringIdTable = BufBeg + ReadLE32(StringIdTableOffset);
+ if (!(StringIdTable >= BufBeg && StringIdTable < BufEnd)) {
+ InvalidPTH(Diags, level);
+ return 0;
+ }
+
+ llvm::OwningPtr<PTHStringIdLookup> SL(PTHStringIdLookup::Create(StringIdTable,
+ BufBeg));
+
+ // Get the location of the spelling cache.
+ const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
+ const unsigned char* spellingBase = BufBeg + ReadLE32(spellingBaseOffset);
+ if (!(spellingBase >= BufBeg && spellingBase < BufEnd)) {
+ InvalidPTH(Diags, level);
+ return 0;
+ }
+
+ // Get the number of IdentifierInfos and pre-allocate the identifier cache.
+ uint32_t NumIds = ReadLE32(IData);
+
+ // Pre-allocate the peristent ID -> IdentifierInfo* cache. We use calloc()
+ // so that we in the best case only zero out memory once when the OS returns
+ // us new pages.
+ IdentifierInfo** PerIDCache = 0;
+
+ if (NumIds) {
+ PerIDCache = (IdentifierInfo**)calloc(NumIds, sizeof(*PerIDCache));
+ if (!PerIDCache) {
+ InvalidPTH(Diags, level,
+ "Could not allocate memory for processing PTH file");
+ return 0;
+ }
+ }
+
+ // Compute the address of the original source file.
+ const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
+ unsigned len = ReadUnalignedLE16(originalSourceBase);
+ if (!len) originalSourceBase = 0;
+
+ // Create the new PTHManager.
+ return new PTHManager(File.take(), FL.take(), IData, PerIDCache,
+ SL.take(), NumIds, spellingBase,
+ (const char*) originalSourceBase);
+}
+
+IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
+ // Look in the PTH file for the string data for the IdentifierInfo object.
+ const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*PersistentID;
+ const unsigned char* IDData =
+ (const unsigned char*)Buf->getBufferStart() + ReadLE32(TableEntry);
+ assert(IDData < (const unsigned char*)Buf->getBufferEnd());
+
+ // Allocate the object.
+ std::pair<IdentifierInfo,const unsigned char*> *Mem =
+ Alloc.Allocate<std::pair<IdentifierInfo,const unsigned char*> >();
+
+ Mem->second = IDData;
+ assert(IDData[0] != '\0');
+ IdentifierInfo *II = new ((void*) Mem) IdentifierInfo();
+
+ // Store the new IdentifierInfo in the cache.
+ PerIDCache[PersistentID] = II;
+ assert(II->getName() && II->getName()[0] != '\0');
+ return II;
+}
+
+IdentifierInfo* PTHManager::get(const char *NameStart, const char *NameEnd) {
+ PTHStringIdLookup& SL = *((PTHStringIdLookup*)StringIdLookup);
+ // Double check our assumption that the last character isn't '\0'.
+ assert(NameEnd==NameStart || NameStart[NameEnd-NameStart-1] != '\0');
+ PTHStringIdLookup::iterator I = SL.find(std::make_pair(NameStart,
+ NameEnd - NameStart));
+ if (I == SL.end()) // No identifier found?
+ return 0;
+
+ // Match found. Return the identifier!
+ assert(*I > 0);
+ return GetIdentifierInfo(*I-1);
+}
+
+PTHLexer *PTHManager::CreateLexer(FileID FID) {
+ const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
+ if (!FE)
+ return 0;
+
+ // Lookup the FileEntry object in our file lookup data structure. It will
+ // return a variant that indicates whether or not there is an offset within
+ // the PTH file that contains cached tokens.
+ PTHFileLookup& PFL = *((PTHFileLookup*)FileLookup);
+ PTHFileLookup::iterator I = PFL.find(FE);
+
+ if (I == PFL.end()) // No tokens available?
+ return 0;
+
+ const PTHFileData& FileData = *I;
+
+ const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
+ // Compute the offset of the token data within the buffer.
+ const unsigned char* data = BufStart + FileData.getTokenOffset();
+
+ // Get the location of pp-conditional table.
+ const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
+ uint32_t Len = ReadLE32(ppcond);
+ if (Len == 0) ppcond = 0;
+
+ assert(PP && "No preprocessor set yet!");
+ return new PTHLexer(*PP, FID, data, ppcond, *this);
+}
+
+//===----------------------------------------------------------------------===//
+// 'stat' caching.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VISIBILITY_HIDDEN PTHStatData {
+public:
+ const bool hasStat;
+ const ino_t ino;
+ const dev_t dev;
+ const mode_t mode;
+ const time_t mtime;
+ const off_t size;
+
+ PTHStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
+ : hasStat(true), ino(i), dev(d), mode(mo), mtime(m), size(s) {}
+
+ PTHStatData()
+ : hasStat(false), ino(0), dev(0), mode(0), mtime(0), size(0) {}
+};
+
+class VISIBILITY_HIDDEN PTHStatLookupTrait : public PTHFileLookupCommonTrait {
+public:
+ typedef const char* external_key_type; // const char*
+ typedef PTHStatData data_type;
+
+ static internal_key_type GetInternalKey(const char *path) {
+ // The key 'kind' doesn't matter here because it is ignored in EqualKey.
+ return std::make_pair((unsigned char) 0x0, path);
+ }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ // When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
+ // just the paths.
+ return strcmp(a.second, b.second) == 0;
+ }
+
+ static data_type ReadData(const internal_key_type& k, const unsigned char* d,
+ unsigned) {
+
+ if (k.first /* File or Directory */) {
+ if (k.first == 0x1 /* File */) d += 4 * 2; // Skip the first 2 words.
+ ino_t ino = (ino_t) ReadUnalignedLE32(d);
+ dev_t dev = (dev_t) ReadUnalignedLE32(d);
+ mode_t mode = (mode_t) ReadUnalignedLE16(d);
+ time_t mtime = (time_t) ReadUnalignedLE64(d);
+ return data_type(ino, dev, mode, mtime, (off_t) ReadUnalignedLE64(d));
+ }
+
+ // Negative stat. Don't read anything.
+ return data_type();
+ }
+};
+
+class VISIBILITY_HIDDEN PTHStatCache : public StatSysCallCache {
+ typedef OnDiskChainedHashTable<PTHStatLookupTrait> CacheTy;
+ CacheTy Cache;
+
+public:
+ PTHStatCache(PTHFileLookup &FL) :
+ Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
+ FL.getBase()) {}
+
+ ~PTHStatCache() {}
+
+ int stat(const char *path, struct stat *buf) {
+ // Do the lookup for the file's data in the PTH file.
+ CacheTy::iterator I = Cache.find(path);
+
+ // If we don't get a hit in the PTH file just forward to 'stat'.
+ if (I == Cache.end()) return ::stat(path, buf);
+
+ const PTHStatData& Data = *I;
+
+ if (!Data.hasStat)
+ return 1;
+
+ buf->st_ino = Data.ino;
+ buf->st_dev = Data.dev;
+ buf->st_mtime = Data.mtime;
+ buf->st_mode = Data.mode;
+ buf->st_size = Data.size;
+ return 0;
+ }
+};
+} // end anonymous namespace
+
+StatSysCallCache *PTHManager::createStatCache() {
+ return new PTHStatCache(*((PTHFileLookup*) FileLookup));
+}
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
new file mode 100644
index 0000000..ce59341
--- /dev/null
+++ b/lib/Lex/Pragma.cpp
@@ -0,0 +1,699 @@
+//===--- Pragma.cpp - Pragma registration and handling --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PragmaHandler/PragmaTable interfaces and implements
+// pragma related methods of the Preprocessor class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+using namespace clang;
+
+// Out-of-line destructor to provide a home for the class.
+PragmaHandler::~PragmaHandler() {
+}
+
+//===----------------------------------------------------------------------===//
+// PragmaNamespace Implementation.
+//===----------------------------------------------------------------------===//
+
+
+PragmaNamespace::~PragmaNamespace() {
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i)
+ delete Handlers[i];
+}
+
+/// FindHandler - Check to see if there is already a handler for the
+/// specified name. If not, return the handler for the null identifier if it
+/// exists, otherwise return null. If IgnoreNull is true (the default) then
+/// the null handler isn't returned on failure to match.
+PragmaHandler *PragmaNamespace::FindHandler(const IdentifierInfo *Name,
+ bool IgnoreNull) const {
+ PragmaHandler *NullHandler = 0;
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ if (Handlers[i]->getName() == Name)
+ return Handlers[i];
+
+ if (Handlers[i]->getName() == 0)
+ NullHandler = Handlers[i];
+ }
+ return IgnoreNull ? 0 : NullHandler;
+}
+
+void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
+ for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+ if (Handlers[i] == Handler) {
+ Handlers[i] = Handlers.back();
+ Handlers.pop_back();
+ return;
+ }
+ }
+ assert(0 && "Handler not registered in this namespace");
+}
+
+void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
+ // Read the 'namespace' that the directive is in, e.g. STDC. Do not macro
+ // expand it, the user can have a STDC #define, that should not affect this.
+ PP.LexUnexpandedToken(Tok);
+
+ // Get the handler for this token. If there is no handler, ignore the pragma.
+ PragmaHandler *Handler = FindHandler(Tok.getIdentifierInfo(), false);
+ if (Handler == 0) {
+ PP.Diag(Tok, diag::warn_pragma_ignored);
+ return;
+ }
+
+ // Otherwise, pass it down.
+ Handler->HandlePragma(PP, Tok);
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Pragma Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandlePragmaDirective - The "#pragma" directive has been parsed. Lex the
+/// rest of the pragma, passing it to the registered pragma handlers.
+void Preprocessor::HandlePragmaDirective() {
+ ++NumPragma;
+
+ // Invoke the first level of pragma handlers which reads the namespace id.
+ Token Tok;
+ PragmaHandlers->HandlePragma(*this, Tok);
+
+ // If the pragma handler didn't read the rest of the line, consume it now.
+ if (CurPPLexer->ParsingPreprocessorDirective)
+ DiscardUntilEndOfDirective();
+}
+
+/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
+/// return the first token after the directive. The _Pragma token has just
+/// been read into 'Tok'.
+void Preprocessor::Handle_Pragma(Token &Tok) {
+ // Remember the pragma token location.
+ SourceLocation PragmaLoc = Tok.getLocation();
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
+
+ // Read the '"..."'.
+ Lex(Tok);
+ if (Tok.isNot(tok::string_literal) && Tok.isNot(tok::wide_string_literal)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
+
+ // Remember the string.
+ std::string StrVal = getSpelling(Tok);
+
+ // Read the ')'.
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
+
+ SourceLocation RParenLoc = Tok.getLocation();
+
+ // The _Pragma is lexically sound. Destringize according to C99 6.10.9.1:
+ // "The string literal is destringized by deleting the L prefix, if present,
+ // deleting the leading and trailing double-quotes, replacing each escape
+ // sequence \" by a double-quote, and replacing each escape sequence \\ by a
+ // single backslash."
+ if (StrVal[0] == 'L') // Remove L prefix.
+ StrVal.erase(StrVal.begin());
+ assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
+ "Invalid string token!");
+
+ // Remove the front quote, replacing it with a space, so that the pragma
+ // contents appear to have a space before them.
+ StrVal[0] = ' ';
+
+ // Replace the terminating quote with a \n.
+ StrVal[StrVal.size()-1] = '\n';
+
+ // Remove escaped quotes and escapes.
+ for (unsigned i = 0, e = StrVal.size(); i != e-1; ++i) {
+ if (StrVal[i] == '\\' &&
+ (StrVal[i+1] == '\\' || StrVal[i+1] == '"')) {
+ // \\ -> '\' and \" -> '"'.
+ StrVal.erase(StrVal.begin()+i);
+ --e;
+ }
+ }
+
+ // Plop the string (including the newline and trailing null) into a buffer
+ // where we can lex it.
+ Token TmpTok;
+ TmpTok.startToken();
+ CreateString(&StrVal[0], StrVal.size(), TmpTok);
+ SourceLocation TokLoc = TmpTok.getLocation();
+
+ // Make and enter a lexer object so that we lex and expand the tokens just
+ // like any others.
+ Lexer *TL = Lexer::Create_PragmaLexer(TokLoc, PragmaLoc, RParenLoc,
+ StrVal.size(), *this);
+
+ EnterSourceFileWithLexer(TL, 0);
+
+ // With everything set up, lex this as a #pragma directive.
+ HandlePragmaDirective();
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+
+
+/// HandlePragmaOnce - Handle #pragma once. OnceTok is the 'once'.
+///
+void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
+ if (isInPrimaryFile()) {
+ Diag(OnceTok, diag::pp_pragma_once_in_main_file);
+ return;
+ }
+
+ // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
+ // Mark the file as a once-only file now.
+ HeaderInfo.MarkFileIncludeOnce(getCurrentFileLexer()->getFileEntry());
+}
+
+void Preprocessor::HandlePragmaMark() {
+ assert(CurPPLexer && "No current lexer?");
+ if (CurLexer) CurLexer->ReadToEndOfLine();
+ else CurPTHLexer->DiscardToEndOfLine();
+}
+
+
+/// HandlePragmaPoison - Handle #pragma GCC poison. PoisonTok is the 'poison'.
+///
+void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
+ Token Tok;
+
+ while (1) {
+ // Read the next token to poison. While doing this, pretend that we are
+ // skipping while reading the identifier to poison.
+ // This avoids errors on code like:
+ // #pragma GCC poison X
+ // #pragma GCC poison X
+ if (CurPPLexer) CurPPLexer->LexingRawMode = true;
+ LexUnexpandedToken(Tok);
+ if (CurPPLexer) CurPPLexer->LexingRawMode = false;
+
+ // If we reached the end of line, we're done.
+ if (Tok.is(tok::eom)) return;
+
+ // Can only poison identifiers.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_pp_invalid_poison);
+ return;
+ }
+
+ // Look up the identifier info for the token. We disabled identifier lookup
+ // by saying we're skipping contents, so we need to do this manually.
+ IdentifierInfo *II = LookUpIdentifierInfo(Tok);
+
+ // Already poisoned.
+ if (II->isPoisoned()) continue;
+
+ // If this is a macro identifier, emit a warning.
+ if (II->hasMacroDefinition())
+ Diag(Tok, diag::pp_poisoning_existing_macro);
+
+ // Finally, poison it!
+ II->setIsPoisoned();
+ }
+}
+
+/// HandlePragmaSystemHeader - Implement #pragma GCC system_header. We know
+/// that the whole directive has been parsed.
+void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
+ if (isInPrimaryFile()) {
+ Diag(SysHeaderTok, diag::pp_pragma_sysheader_in_main_file);
+ return;
+ }
+
+ // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
+ PreprocessorLexer *TheLexer = getCurrentFileLexer();
+
+ // Mark the file as a system header.
+ HeaderInfo.MarkFileSystemHeader(TheLexer->getFileEntry());
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks)
+ Callbacks->FileChanged(SysHeaderTok.getLocation(),
+ PPCallbacks::SystemHeaderPragma, SrcMgr::C_System);
+}
+
+/// HandlePragmaDependency - Handle #pragma GCC dependency "foo" blah.
+///
+void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
+ Token FilenameTok;
+ CurPPLexer->LexIncludeFilename(FilenameTok);
+
+ // If the token kind is EOM, the error has already been diagnosed.
+ if (FilenameTok.is(tok::eom))
+ return;
+
+ // Reserve a buffer to get the spelling.
+ llvm::SmallVector<char, 128> FilenameBuffer;
+ FilenameBuffer.resize(FilenameTok.getLength());
+
+ const char *FilenameStart = &FilenameBuffer[0];
+ unsigned Len = getSpelling(FilenameTok, FilenameStart);
+ const char *FilenameEnd = FilenameStart+Len;
+ bool isAngled = GetIncludeFilenameSpelling(FilenameTok.getLocation(),
+ FilenameStart, FilenameEnd);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (FilenameStart == 0)
+ return;
+
+ // Search include directories for this file.
+ const DirectoryLookup *CurDir;
+ const FileEntry *File = LookupFile(FilenameStart, FilenameEnd,
+ isAngled, 0, CurDir);
+ if (File == 0) {
+ Diag(FilenameTok, diag::err_pp_file_not_found)
+ << std::string(FilenameStart, FilenameEnd);
+ return;
+ }
+
+ const FileEntry *CurFile = getCurrentFileLexer()->getFileEntry();
+
+ // If this file is older than the file it depends on, emit a diagnostic.
+ if (CurFile && CurFile->getModificationTime() < File->getModificationTime()) {
+ // Lex tokens at the end of the message and include them in the message.
+ std::string Message;
+ Lex(DependencyTok);
+ while (DependencyTok.isNot(tok::eom)) {
+ Message += getSpelling(DependencyTok) + " ";
+ Lex(DependencyTok);
+ }
+
+ Message.erase(Message.end()-1);
+ Diag(FilenameTok, diag::pp_out_of_date_dependency) << Message;
+ }
+}
+
+/// HandlePragmaComment - Handle the microsoft #pragma comment extension. The
+/// syntax is:
+/// #pragma comment(linker, "foo")
+/// 'linker' is one of five identifiers: compiler, exestr, lib, linker, user.
+/// "foo" is a string, which is fully macro expanded, and permits string
+/// concatenation, embedded escape characters etc. See MSDN for more details.
+void Preprocessor::HandlePragmaComment(Token &Tok) {
+ SourceLocation CommentLoc = Tok.getLocation();
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(CommentLoc, diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // Read the identifier.
+ Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ Diag(CommentLoc, diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // Verify that this is one of the 5 whitelisted options.
+ // FIXME: warn that 'exestr' is deprecated.
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II->isStr("compiler") && !II->isStr("exestr") && !II->isStr("lib") &&
+ !II->isStr("linker") && !II->isStr("user")) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_unknown_kind);
+ return;
+ }
+
+ // Read the optional string if present.
+ Lex(Tok);
+ std::string ArgumentString;
+ if (Tok.is(tok::comma)) {
+ Lex(Tok); // eat the comma.
+
+ // We need at least one string.
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // String concatenation allows multiple strings, which can even come from
+ // macro expansion.
+ // "foo " "bar" "Baz"
+ llvm::SmallVector<Token, 4> StrToks;
+ while (Tok.is(tok::string_literal)) {
+ StrToks.push_back(Tok);
+ Lex(Tok);
+ }
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(&StrToks[0], StrToks.size(), *this);
+ assert(!Literal.AnyWide && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return;
+ if (Literal.Pascal) {
+ Diag(StrToks[0].getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ ArgumentString = std::string(Literal.GetString(),
+ Literal.GetString()+Literal.GetStringLength());
+ }
+
+ // FIXME: If the kind is "compiler" warn if the string is present (it is
+ // ignored).
+ // FIXME: 'lib' requires a comment string.
+ // FIXME: 'linker' requires a comment string, and has a specific list of
+ // things that are allowable.
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+ Lex(Tok); // eat the r_paren.
+
+ if (Tok.isNot(tok::eom)) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // If the pragma is lexically sound, notify any interested PPCallbacks.
+ if (Callbacks)
+ Callbacks->PragmaComment(CommentLoc, II, ArgumentString);
+}
+
+
+
+
+/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
+/// If 'Namespace' is non-null, then it is a token required to exist on the
+/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
+void Preprocessor::AddPragmaHandler(const char *Namespace,
+ PragmaHandler *Handler) {
+ PragmaNamespace *InsertNS = PragmaHandlers;
+
+ // If this is specified to be in a namespace, step down into it.
+ if (Namespace) {
+ IdentifierInfo *NSID = getIdentifierInfo(Namespace);
+
+ // If there is already a pragma handler with the name of this namespace,
+ // we either have an error (directive with the same name as a namespace) or
+ // we already have the namespace to insert into.
+ if (PragmaHandler *Existing = PragmaHandlers->FindHandler(NSID)) {
+ InsertNS = Existing->getIfNamespace();
+ assert(InsertNS != 0 && "Cannot have a pragma namespace and pragma"
+ " handler with the same name!");
+ } else {
+ // Otherwise, this namespace doesn't exist yet, create and insert the
+ // handler for it.
+ InsertNS = new PragmaNamespace(NSID);
+ PragmaHandlers->AddPragma(InsertNS);
+ }
+ }
+
+ // Check to make sure we don't already have a pragma for this identifier.
+ assert(!InsertNS->FindHandler(Handler->getName()) &&
+ "Pragma handler already exists for this identifier!");
+ InsertNS->AddPragma(Handler);
+}
+
+/// RemovePragmaHandler - Remove the specific pragma handler from the
+/// preprocessor. If \arg Namespace is non-null, then it should be the
+/// namespace that \arg Handler was added to. It is an error to remove
+/// a handler that has not been registered.
+void Preprocessor::RemovePragmaHandler(const char *Namespace,
+ PragmaHandler *Handler) {
+ PragmaNamespace *NS = PragmaHandlers;
+
+ // If this is specified to be in a namespace, step down into it.
+ if (Namespace) {
+ IdentifierInfo *NSID = getIdentifierInfo(Namespace);
+ PragmaHandler *Existing = PragmaHandlers->FindHandler(NSID);
+ assert(Existing && "Namespace containing handler does not exist!");
+
+ NS = Existing->getIfNamespace();
+ assert(NS && "Invalid namespace, registered as a regular pragma handler!");
+ }
+
+ NS->RemovePragmaHandler(Handler);
+
+ // If this is a non-default namespace and it is now empty, remove
+ // it.
+ if (NS != PragmaHandlers && NS->IsEmpty())
+ PragmaHandlers->RemovePragmaHandler(NS);
+}
+
+namespace {
+/// PragmaOnceHandler - "#pragma once" marks the file as atomically included.
+struct PragmaOnceHandler : public PragmaHandler {
+ PragmaOnceHandler(const IdentifierInfo *OnceID) : PragmaHandler(OnceID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &OnceTok) {
+ PP.CheckEndOfDirective("pragma once");
+ PP.HandlePragmaOnce(OnceTok);
+ }
+};
+
+/// PragmaMarkHandler - "#pragma mark ..." is ignored by the compiler, and the
+/// rest of the line is not lexed.
+struct PragmaMarkHandler : public PragmaHandler {
+ PragmaMarkHandler(const IdentifierInfo *MarkID) : PragmaHandler(MarkID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &MarkTok) {
+ PP.HandlePragmaMark();
+ }
+};
+
+/// PragmaPoisonHandler - "#pragma poison x" marks x as not usable.
+struct PragmaPoisonHandler : public PragmaHandler {
+ PragmaPoisonHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &PoisonTok) {
+ PP.HandlePragmaPoison(PoisonTok);
+ }
+};
+
+/// PragmaSystemHeaderHandler - "#pragma system_header" marks the current file
+/// as a system header, which silences warnings in it.
+struct PragmaSystemHeaderHandler : public PragmaHandler {
+ PragmaSystemHeaderHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &SHToken) {
+ PP.HandlePragmaSystemHeader(SHToken);
+ PP.CheckEndOfDirective("pragma");
+ }
+};
+struct PragmaDependencyHandler : public PragmaHandler {
+ PragmaDependencyHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &DepToken) {
+ PP.HandlePragmaDependency(DepToken);
+ }
+};
+
+/// PragmaDiagnosticHandler - e.g. '#pragma GCC diagnostic ignored "-Wformat"'
+struct PragmaDiagnosticHandler : public PragmaHandler {
+ PragmaDiagnosticHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &DiagToken) {
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ diag::Mapping Map;
+ if (II->isStr("warning"))
+ Map = diag::MAP_WARNING;
+ else if (II->isStr("error"))
+ Map = diag::MAP_ERROR;
+ else if (II->isStr("ignored"))
+ Map = diag::MAP_IGNORE;
+ else if (II->isStr("fatal"))
+ Map = diag::MAP_FATAL;
+ else {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+
+ PP.LexUnexpandedToken(Tok);
+
+ // We need at least one string.
+ if (Tok.isNot(tok::string_literal)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
+ return;
+ }
+
+ // String concatenation allows multiple strings, which can even come from
+ // macro expansion.
+ // "foo " "bar" "Baz"
+ llvm::SmallVector<Token, 4> StrToks;
+ while (Tok.is(tok::string_literal)) {
+ StrToks.push_back(Tok);
+ PP.LexUnexpandedToken(Tok);
+ }
+
+ if (Tok.isNot(tok::eom)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
+ return;
+ }
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(&StrToks[0], StrToks.size(), PP);
+ assert(!Literal.AnyWide && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return;
+ if (Literal.Pascal) {
+ PP.Diag(StrToks[0].getLocation(), diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+
+ std::string WarningName(Literal.GetString(),
+ Literal.GetString()+Literal.GetStringLength());
+
+ if (WarningName.size() < 3 || WarningName[0] != '-' ||
+ WarningName[1] != 'W') {
+ PP.Diag(StrToks[0].getLocation(),
+ diag::warn_pragma_diagnostic_invalid_option);
+ return;
+ }
+
+ if (PP.getDiagnostics().setDiagnosticGroupMapping(WarningName.c_str()+2,
+ Map))
+ PP.Diag(StrToks[0].getLocation(),
+ diag::warn_pragma_diagnostic_unknown_warning) << WarningName;
+ }
+};
+
+/// PragmaCommentHandler - "#pragma comment ...".
+struct PragmaCommentHandler : public PragmaHandler {
+ PragmaCommentHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &CommentTok) {
+ PP.HandlePragmaComment(CommentTok);
+ }
+};
+
+// Pragma STDC implementations.
+
+enum STDCSetting {
+ STDC_ON, STDC_OFF, STDC_DEFAULT, STDC_INVALID
+};
+
+static STDCSetting LexOnOffSwitch(Preprocessor &PP) {
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::ext_stdc_pragma_syntax);
+ return STDC_INVALID;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ STDCSetting Result;
+ if (II->isStr("ON"))
+ Result = STDC_ON;
+ else if (II->isStr("OFF"))
+ Result = STDC_OFF;
+ else if (II->isStr("DEFAULT"))
+ Result = STDC_DEFAULT;
+ else {
+ PP.Diag(Tok, diag::ext_stdc_pragma_syntax);
+ return STDC_INVALID;
+ }
+
+ // Verify that this is followed by EOM.
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eom))
+ PP.Diag(Tok, diag::ext_stdc_pragma_syntax_eom);
+ return Result;
+}
+
+/// PragmaSTDC_FP_CONTRACTHandler - "#pragma STDC FP_CONTRACT ...".
+struct PragmaSTDC_FP_CONTRACTHandler : public PragmaHandler {
+ PragmaSTDC_FP_CONTRACTHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
+ // We just ignore the setting of FP_CONTRACT. Since we don't do contractions
+ // at all, our default is OFF and setting it to ON is an optimization hint
+ // we can safely ignore. When we support -ffma or something, we would need
+ // to diagnose that we are ignoring FMA.
+ LexOnOffSwitch(PP);
+ }
+};
+
+/// PragmaSTDC_FENV_ACCESSHandler - "#pragma STDC FENV_ACCESS ...".
+struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
+ PragmaSTDC_FENV_ACCESSHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
+ if (LexOnOffSwitch(PP) == STDC_ON)
+ PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
+ }
+};
+
+/// PragmaSTDC_CX_LIMITED_RANGEHandler - "#pragma STDC CX_LIMITED_RANGE ...".
+struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
+ PragmaSTDC_CX_LIMITED_RANGEHandler(const IdentifierInfo *ID)
+ : PragmaHandler(ID) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &Tok) {
+ LexOnOffSwitch(PP);
+ }
+};
+
+/// PragmaSTDC_UnknownHandler - "#pragma STDC ...".
+struct PragmaSTDC_UnknownHandler : public PragmaHandler {
+ PragmaSTDC_UnknownHandler() : PragmaHandler(0) {}
+ virtual void HandlePragma(Preprocessor &PP, Token &UnknownTok) {
+ // C99 6.10.6p2, unknown forms are not allowed.
+ PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
+ }
+};
+
+} // end anonymous namespace
+
+
+/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
+/// #pragma GCC poison/system_header/dependency and #pragma once.
+void Preprocessor::RegisterBuiltinPragmas() {
+ AddPragmaHandler(0, new PragmaOnceHandler(getIdentifierInfo("once")));
+ AddPragmaHandler(0, new PragmaMarkHandler(getIdentifierInfo("mark")));
+
+ // #pragma GCC ...
+ AddPragmaHandler("GCC", new PragmaPoisonHandler(getIdentifierInfo("poison")));
+ AddPragmaHandler("GCC", new PragmaSystemHeaderHandler(
+ getIdentifierInfo("system_header")));
+ AddPragmaHandler("GCC", new PragmaDependencyHandler(
+ getIdentifierInfo("dependency")));
+ AddPragmaHandler("GCC", new PragmaDiagnosticHandler(
+ getIdentifierInfo("diagnostic")));
+ // #pragma clang ...
+ AddPragmaHandler("clang", new PragmaPoisonHandler(
+ getIdentifierInfo("poison")));
+ AddPragmaHandler("clang", new PragmaSystemHeaderHandler(
+ getIdentifierInfo("system_header")));
+ AddPragmaHandler("clang", new PragmaDependencyHandler(
+ getIdentifierInfo("dependency")));
+ AddPragmaHandler("clang", new PragmaDiagnosticHandler(
+ getIdentifierInfo("diagnostic")));
+
+ AddPragmaHandler("STDC", new PragmaSTDC_FP_CONTRACTHandler(
+ getIdentifierInfo("FP_CONTRACT")));
+ AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler(
+ getIdentifierInfo("FENV_ACCESS")));
+ AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler(
+ getIdentifierInfo("CX_LIMITED_RANGE")));
+ AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler());
+
+ // MS extensions.
+ if (Features.Microsoft)
+ AddPragmaHandler(0, new PragmaCommentHandler(getIdentifierInfo("comment")));
+}
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
new file mode 100644
index 0000000..0a7d92e
--- /dev/null
+++ b/lib/Lex/Preprocessor.cpp
@@ -0,0 +1,478 @@
+//===--- Preprocess.cpp - C Language Family Preprocessor Implementation ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Preprocessor interface.
+//
+//===----------------------------------------------------------------------===//
+//
+// Options to support:
+// -H - Print the name of each header file used.
+// -d[DNI] - Dump various things.
+// -fworking-directory - #line's with preprocessor's working dir.
+// -fpreprocessed
+// -dependency-file,-M,-MM,-MF,-MG,-MP,-MT,-MQ,-MD,-MMD
+// -W*
+// -w
+//
+// Messages to emit:
+// "Multiple include guards may be useful for:\n"
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Streams.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+
+PreprocessorFactory::~PreprocessorFactory() {}
+
+Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts,
+ TargetInfo &target, SourceManager &SM,
+ HeaderSearch &Headers,
+ IdentifierInfoLookup* IILookup)
+ : Diags(&diags), Features(opts), Target(target),FileMgr(Headers.getFileMgr()),
+ SourceMgr(SM), HeaderInfo(Headers), Identifiers(opts, IILookup),
+ CurPPLexer(0), CurDirLookup(0), Callbacks(0) {
+ ScratchBuf = new ScratchBuffer(SourceMgr);
+ CounterValue = 0; // __COUNTER__ starts at 0.
+
+ // Clear stats.
+ NumDirectives = NumDefined = NumUndefined = NumPragma = 0;
+ NumIf = NumElse = NumEndif = 0;
+ NumEnteredSourceFiles = 0;
+ NumMacroExpanded = NumFnMacroExpanded = NumBuiltinMacroExpanded = 0;
+ NumFastMacroExpanded = NumTokenPaste = NumFastTokenPaste = 0;
+ MaxIncludeStackDepth = 0;
+ NumSkipped = 0;
+
+ // Default to discarding comments.
+ KeepComments = false;
+ KeepMacroComments = false;
+
+ // Macro expansion is enabled.
+ DisableMacroExpansion = false;
+ InMacroArgs = false;
+ NumCachedTokenLexers = 0;
+
+ CachedLexPos = 0;
+
+ // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
+ // This gets unpoisoned where it is allowed.
+ (Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
+
+ // Initialize the pragma handlers.
+ PragmaHandlers = new PragmaNamespace(0);
+ RegisterBuiltinPragmas();
+
+ // Initialize builtin macros like __LINE__ and friends.
+ RegisterBuiltinMacros();
+}
+
+Preprocessor::~Preprocessor() {
+ assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
+
+ while (!IncludeMacroStack.empty()) {
+ delete IncludeMacroStack.back().TheLexer;
+ delete IncludeMacroStack.back().TheTokenLexer;
+ IncludeMacroStack.pop_back();
+ }
+
+ // Free any macro definitions.
+ for (llvm::DenseMap<IdentifierInfo*, MacroInfo*>::iterator I =
+ Macros.begin(), E = Macros.end(); I != E; ++I) {
+ // We don't need to free the MacroInfo objects directly. These
+ // will be released when the BumpPtrAllocator 'BP' object gets
+ // destroyed. We still need to run the dstor, however, to free
+ // memory alocated by MacroInfo.
+ I->second->Destroy(BP);
+ I->first->setHasMacroDefinition(false);
+ }
+
+ // Free any cached macro expanders.
+ for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
+ delete TokenLexerCache[i];
+
+ // Release pragma information.
+ delete PragmaHandlers;
+
+ // Delete the scratch buffer info.
+ delete ScratchBuf;
+
+ delete Callbacks;
+}
+
+void Preprocessor::setPTHManager(PTHManager* pm) {
+ PTH.reset(pm);
+ FileMgr.setStatCache(PTH->createStatCache());
+}
+
+void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
+ llvm::cerr << tok::getTokenName(Tok.getKind()) << " '"
+ << getSpelling(Tok) << "'";
+
+ if (!DumpFlags) return;
+
+ llvm::cerr << "\t";
+ if (Tok.isAtStartOfLine())
+ llvm::cerr << " [StartOfLine]";
+ if (Tok.hasLeadingSpace())
+ llvm::cerr << " [LeadingSpace]";
+ if (Tok.isExpandDisabled())
+ llvm::cerr << " [ExpandDisabled]";
+ if (Tok.needsCleaning()) {
+ const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
+ llvm::cerr << " [UnClean='" << std::string(Start, Start+Tok.getLength())
+ << "']";
+ }
+
+ llvm::cerr << "\tLoc=<";
+ DumpLocation(Tok.getLocation());
+ llvm::cerr << ">";
+}
+
+void Preprocessor::DumpLocation(SourceLocation Loc) const {
+ Loc.dump(SourceMgr);
+}
+
+void Preprocessor::DumpMacro(const MacroInfo &MI) const {
+ llvm::cerr << "MACRO: ";
+ for (unsigned i = 0, e = MI.getNumTokens(); i != e; ++i) {
+ DumpToken(MI.getReplacementToken(i));
+ llvm::cerr << " ";
+ }
+ llvm::cerr << "\n";
+}
+
+void Preprocessor::PrintStats() {
+ llvm::cerr << "\n*** Preprocessor Stats:\n";
+ llvm::cerr << NumDirectives << " directives found:\n";
+ llvm::cerr << " " << NumDefined << " #define.\n";
+ llvm::cerr << " " << NumUndefined << " #undef.\n";
+ llvm::cerr << " #include/#include_next/#import:\n";
+ llvm::cerr << " " << NumEnteredSourceFiles << " source files entered.\n";
+ llvm::cerr << " " << MaxIncludeStackDepth << " max include stack depth\n";
+ llvm::cerr << " " << NumIf << " #if/#ifndef/#ifdef.\n";
+ llvm::cerr << " " << NumElse << " #else/#elif.\n";
+ llvm::cerr << " " << NumEndif << " #endif.\n";
+ llvm::cerr << " " << NumPragma << " #pragma.\n";
+ llvm::cerr << NumSkipped << " #if/#ifndef#ifdef regions skipped\n";
+
+ llvm::cerr << NumMacroExpanded << "/" << NumFnMacroExpanded << "/"
+ << NumBuiltinMacroExpanded << " obj/fn/builtin macros expanded, "
+ << NumFastMacroExpanded << " on the fast path.\n";
+ llvm::cerr << (NumFastTokenPaste+NumTokenPaste)
+ << " token paste (##) operations performed, "
+ << NumFastTokenPaste << " on the fast path.\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Token Spelling
+//===----------------------------------------------------------------------===//
+
+
+/// getSpelling() - Return the 'spelling' of this token. The spelling of a
+/// token are the characters used to represent the token in the source file
+/// after trigraph expansion and escaped-newline folding. In particular, this
+/// wants to get the true, uncanonicalized, spelling of things like digraphs
+/// UCNs, etc.
+std::string Preprocessor::getSpelling(const Token &Tok) const {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ // If this token contains nothing interesting, return it directly.
+ const char* TokStart = SourceMgr.getCharacterData(Tok.getLocation());
+ if (!Tok.needsCleaning())
+ return std::string(TokStart, TokStart+Tok.getLength());
+
+ std::string Result;
+ Result.reserve(Tok.getLength());
+
+ // Otherwise, hard case, relex the characters into the string.
+ for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
+ Ptr != End; ) {
+ unsigned CharSize;
+ Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features));
+ Ptr += CharSize;
+ }
+ assert(Result.size() != unsigned(Tok.getLength()) &&
+ "NeedsCleaning flag set on something that didn't need cleaning!");
+ return Result;
+}
+
+/// getSpelling - This method is used to get the spelling of a token into a
+/// preallocated buffer, instead of as an std::string. The caller is required
+/// to allocate enough space for the token, which is guaranteed to be at least
+/// Tok.getLength() bytes long. The actual length of the token is returned.
+///
+/// Note that this method may do two possible things: it may either fill in
+/// the buffer specified with characters, or it may *change the input pointer*
+/// to point to a constant buffer with the data already in it (avoiding a
+/// copy). The caller is not allowed to modify the returned buffer pointer
+/// if an internal buffer is returned.
+unsigned Preprocessor::getSpelling(const Token &Tok,
+ const char *&Buffer) const {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ // If this token is an identifier, just return the string from the identifier
+ // table, which is very quick.
+ if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ Buffer = II->getName();
+ return II->getLength();
+ }
+
+ // Otherwise, compute the start of the token in the input lexer buffer.
+ const char *TokStart = 0;
+
+ if (Tok.isLiteral())
+ TokStart = Tok.getLiteralData();
+
+ if (TokStart == 0)
+ TokStart = SourceMgr.getCharacterData(Tok.getLocation());
+
+ // If this token contains nothing interesting, return it directly.
+ if (!Tok.needsCleaning()) {
+ Buffer = TokStart;
+ return Tok.getLength();
+ }
+
+ // Otherwise, hard case, relex the characters into the string.
+ char *OutBuf = const_cast<char*>(Buffer);
+ for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
+ Ptr != End; ) {
+ unsigned CharSize;
+ *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, Features);
+ Ptr += CharSize;
+ }
+ assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
+ "NeedsCleaning flag set on something that didn't need cleaning!");
+
+ return OutBuf-Buffer;
+}
+
+/// CreateString - Plop the specified string into a scratch buffer and return a
+/// location for it. If specified, the source location provides a source
+/// location for the token.
+void Preprocessor::CreateString(const char *Buf, unsigned Len, Token &Tok,
+ SourceLocation InstantiationLoc) {
+ Tok.setLength(Len);
+
+ const char *DestPtr;
+ SourceLocation Loc = ScratchBuf->getToken(Buf, Len, DestPtr);
+
+ if (InstantiationLoc.isValid())
+ Loc = SourceMgr.createInstantiationLoc(Loc, InstantiationLoc,
+ InstantiationLoc, Len);
+ Tok.setLocation(Loc);
+
+ // If this is a literal token, set the pointer data.
+ if (Tok.isLiteral())
+ Tok.setLiteralData(DestPtr);
+}
+
+
+/// AdvanceToTokenCharacter - Given a location that specifies the start of a
+/// token, return a new location that specifies a character within the token.
+SourceLocation Preprocessor::AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned CharNo) {
+ // Figure out how many physical characters away the specified instantiation
+ // character is. This needs to take into consideration newlines and
+ // trigraphs.
+ const char *TokPtr = SourceMgr.getCharacterData(TokStart);
+
+ // If they request the first char of the token, we're trivially done.
+ if (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr))
+ return TokStart;
+
+ unsigned PhysOffset = 0;
+
+ // The usual case is that tokens don't contain anything interesting. Skip
+ // over the uninteresting characters. If a token only consists of simple
+ // chars, this method is extremely fast.
+ while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
+ if (CharNo == 0)
+ return TokStart.getFileLocWithOffset(PhysOffset);
+ ++TokPtr, --CharNo, ++PhysOffset;
+ }
+
+ // If we have a character that may be a trigraph or escaped newline, use a
+ // lexer to parse it correctly.
+ for (; CharNo; --CharNo) {
+ unsigned Size;
+ Lexer::getCharAndSizeNoWarn(TokPtr, Size, Features);
+ TokPtr += Size;
+ PhysOffset += Size;
+ }
+
+ // Final detail: if we end up on an escaped newline, we want to return the
+ // location of the actual byte of the token. For example foo\<newline>bar
+ // advanced by 3 should return the location of b, not of \\. One compounding
+ // detail of this is that the escape may be made by a trigraph.
+ if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
+ PhysOffset = Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
+
+ return TokStart.getFileLocWithOffset(PhysOffset);
+}
+
+/// \brief Computes the source location just past the end of the
+/// token at this source location.
+///
+/// This routine can be used to produce a source location that
+/// points just past the end of the token referenced by \p Loc, and
+/// is generally used when a diagnostic needs to point just after a
+/// token where it expected something different that it received. If
+/// the returned source location would not be meaningful (e.g., if
+/// it points into a macro), this routine returns an invalid
+/// source location.
+SourceLocation Preprocessor::getLocForEndOfToken(SourceLocation Loc) {
+ if (Loc.isInvalid() || !Loc.isFileID())
+ return SourceLocation();
+
+ unsigned Len = Lexer::MeasureTokenLength(Loc, getSourceManager(), Features);
+ return AdvanceToTokenCharacter(Loc, Len);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Initialization Methods
+//===----------------------------------------------------------------------===//
+
+
+/// EnterMainSourceFile - Enter the specified FileID as the main source file,
+/// which implicitly adds the builtin defines etc.
+void Preprocessor::EnterMainSourceFile() {
+ // We do not allow the preprocessor to reenter the main file. Doing so will
+ // cause FileID's to accumulate information from both runs (e.g. #line
+ // information) and predefined macros aren't guaranteed to be set properly.
+ assert(NumEnteredSourceFiles == 0 && "Cannot reenter the main file!");
+ FileID MainFileID = SourceMgr.getMainFileID();
+
+ // Enter the main file source buffer.
+ EnterSourceFile(MainFileID, 0);
+
+ // Tell the header info that the main file was entered. If the file is later
+ // #imported, it won't be re-entered.
+ if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
+ HeaderInfo.IncrementIncludeCount(FE);
+
+ std::vector<char> PrologFile;
+ PrologFile.reserve(4080);
+
+ // FIXME: Don't make a copy.
+ PrologFile.insert(PrologFile.end(), Predefines.begin(), Predefines.end());
+
+ // Memory buffer must end with a null byte!
+ PrologFile.push_back(0);
+
+ // Now that we have emitted the predefined macros, #includes, etc into
+ // PrologFile, preprocess it to populate the initial preprocessor state.
+ llvm::MemoryBuffer *SB =
+ llvm::MemoryBuffer::getMemBufferCopy(&PrologFile.front(),&PrologFile.back(),
+ "<built-in>");
+ assert(SB && "Cannot fail to create predefined source buffer");
+ FileID FID = SourceMgr.createFileIDForMemBuffer(SB);
+ assert(!FID.isInvalid() && "Could not create FileID for predefines?");
+
+ // Start parsing the predefines.
+ EnterSourceFile(FID, 0);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Lexer Event Handling.
+//===----------------------------------------------------------------------===//
+
+/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
+/// identifier information for the token and install it into the token.
+IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier,
+ const char *BufPtr) {
+ assert(Identifier.is(tok::identifier) && "Not an identifier!");
+ assert(Identifier.getIdentifierInfo() == 0 && "Identinfo already exists!");
+
+ // Look up this token, see if it is a macro, or if it is a language keyword.
+ IdentifierInfo *II;
+ if (BufPtr && !Identifier.needsCleaning()) {
+ // No cleaning needed, just use the characters from the lexed buffer.
+ II = getIdentifierInfo(BufPtr, BufPtr+Identifier.getLength());
+ } else {
+ // Cleaning needed, alloca a buffer, clean into it, then use the buffer.
+ llvm::SmallVector<char, 64> IdentifierBuffer;
+ IdentifierBuffer.resize(Identifier.getLength());
+ const char *TmpBuf = &IdentifierBuffer[0];
+ unsigned Size = getSpelling(Identifier, TmpBuf);
+ II = getIdentifierInfo(TmpBuf, TmpBuf+Size);
+ }
+ Identifier.setIdentifierInfo(II);
+ return II;
+}
+
+
+/// HandleIdentifier - This callback is invoked when the lexer reads an
+/// identifier. This callback looks up the identifier in the map and/or
+/// potentially macro expands it or turns it into a named token (like 'for').
+///
+/// Note that callers of this method are guarded by checking the
+/// IdentifierInfo's 'isHandleIdentifierCase' bit. If this method changes, the
+/// IdentifierInfo methods that compute these properties will need to change to
+/// match.
+void Preprocessor::HandleIdentifier(Token &Identifier) {
+ assert(Identifier.getIdentifierInfo() &&
+ "Can't handle identifiers without identifier info!");
+
+ IdentifierInfo &II = *Identifier.getIdentifierInfo();
+
+ // If this identifier was poisoned, and if it was not produced from a macro
+ // expansion, emit an error.
+ if (II.isPoisoned() && CurPPLexer) {
+ if (&II != Ident__VA_ARGS__) // We warn about __VA_ARGS__ with poisoning.
+ Diag(Identifier, diag::err_pp_used_poisoned_id);
+ else
+ Diag(Identifier, diag::ext_pp_bad_vaargs_use);
+ }
+
+ // If this is a macro to be expanded, do it.
+ if (MacroInfo *MI = getMacroInfo(&II)) {
+ if (!DisableMacroExpansion && !Identifier.isExpandDisabled()) {
+ if (MI->isEnabled()) {
+ if (!HandleMacroExpandedIdentifier(Identifier, MI))
+ return;
+ } else {
+ // C99 6.10.3.4p2 says that a disabled macro may never again be
+ // expanded, even if it's in a context where it could be expanded in the
+ // future.
+ Identifier.setFlag(Token::DisableExpand);
+ }
+ }
+ }
+
+ // C++ 2.11p2: If this is an alternative representation of a C++ operator,
+ // then we act as if it is the actual operator and not the textual
+ // representation of it.
+ if (II.isCPlusPlusOperatorKeyword())
+ Identifier.setIdentifierInfo(0);
+
+ // If this is an extension token, diagnose its use.
+ // We avoid diagnosing tokens that originate from macro definitions.
+ // FIXME: This warning is disabled in cases where it shouldn't be,
+ // like "#define TY typeof", "TY(1) x".
+ if (II.isExtensionToken() && !DisableMacroExpansion)
+ Diag(Identifier, diag::ext_token_used);
+}
diff --git a/lib/Lex/PreprocessorLexer.cpp b/lib/Lex/PreprocessorLexer.cpp
new file mode 100644
index 0000000..f9dfad9
--- /dev/null
+++ b/lib/Lex/PreprocessorLexer.cpp
@@ -0,0 +1,45 @@
+//===--- PreprocessorLexer.cpp - C Language Family Lexer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PreprocessorLexer and Token interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/PreprocessorLexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+using namespace clang;
+
+/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
+/// (potentially) macro expand the filename.
+void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
+ assert(ParsingPreprocessorDirective &&
+ ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+
+ // We are now parsing a filename!
+ ParsingFilename = true;
+
+ // Lex the filename.
+ IndirectLex(FilenameTok);
+
+ // We should have obtained the filename now.
+ ParsingFilename = false;
+
+ // No filename?
+ if (FilenameTok.is(tok::eom))
+ PP->Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+}
+
+/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
+/// getFileID(), this only works for lexers with attached preprocessors.
+const FileEntry *PreprocessorLexer::getFileEntry() const {
+ return PP->getSourceManager().getFileEntryForID(getFileID());
+}
diff --git a/lib/Lex/ScratchBuffer.cpp b/lib/Lex/ScratchBuffer.cpp
new file mode 100644
index 0000000..28f3d7f
--- /dev/null
+++ b/lib/Lex/ScratchBuffer.cpp
@@ -0,0 +1,73 @@
+//===--- ScratchBuffer.cpp - Scratch space for forming tokens -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScratchBuffer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+using namespace clang;
+
+// ScratchBufSize - The size of each chunk of scratch memory. Slightly less
+//than a page, almost certainly enough for anything. :)
+static const unsigned ScratchBufSize = 4060;
+
+ScratchBuffer::ScratchBuffer(SourceManager &SM) : SourceMgr(SM), CurBuffer(0) {
+ // Set BytesUsed so that the first call to getToken will require an alloc.
+ BytesUsed = ScratchBufSize;
+}
+
+/// getToken - Splat the specified text into a temporary MemoryBuffer and
+/// return a SourceLocation that refers to the token. This is just like the
+/// method below, but returns a location that indicates the physloc of the
+/// token.
+SourceLocation ScratchBuffer::getToken(const char *Buf, unsigned Len,
+ const char *&DestPtr) {
+ if (BytesUsed+Len+2 > ScratchBufSize)
+ AllocScratchBuffer(Len+2);
+
+ // Prefix the token with a \n, so that it looks like it is the first thing on
+ // its own virtual line in caret diagnostics.
+ CurBuffer[BytesUsed++] = '\n';
+
+ // Return a pointer to the character data.
+ DestPtr = CurBuffer+BytesUsed;
+
+ // Copy the token data into the buffer.
+ memcpy(CurBuffer+BytesUsed, Buf, Len);
+
+ // Remember that we used these bytes.
+ BytesUsed += Len+1;
+
+ // Add a NUL terminator to the token. This keeps the tokens separated, in
+ // case they get relexed, and puts them on their own virtual lines in case a
+ // diagnostic points to one.
+ CurBuffer[BytesUsed-1] = '\0';
+
+ return BufferStartLoc.getFileLocWithOffset(BytesUsed-Len-1);
+}
+
+void ScratchBuffer::AllocScratchBuffer(unsigned RequestLen) {
+ // Only pay attention to the requested length if it is larger than our default
+ // page size. If it is, we allocate an entire chunk for it. This is to
+ // support gigantic tokens, which almost certainly won't happen. :)
+ if (RequestLen < ScratchBufSize)
+ RequestLen = ScratchBufSize;
+
+ llvm::MemoryBuffer *Buf =
+ llvm::MemoryBuffer::getNewMemBuffer(RequestLen, "<scratch space>");
+ FileID FID = SourceMgr.createFileIDForMemBuffer(Buf);
+ BufferStartLoc = SourceMgr.getLocForStartOfFile(FID);
+ CurBuffer = const_cast<char*>(Buf->getBufferStart());
+ BytesUsed = 1;
+ CurBuffer[0] = '0'; // Start out with a \0 for cleanliness.
+}
diff --git a/lib/Lex/TokenConcatenation.cpp b/lib/Lex/TokenConcatenation.cpp
new file mode 100644
index 0000000..ab989ca
--- /dev/null
+++ b/lib/Lex/TokenConcatenation.cpp
@@ -0,0 +1,219 @@
+//===--- TokenConcatenation.cpp - Token Concatenation Avoidance -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenConcatenation class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/TokenConcatenation.h"
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+
+/// StartsWithL - Return true if the spelling of this token starts with 'L'.
+bool TokenConcatenation::StartsWithL(const Token &Tok) const {
+ if (!Tok.needsCleaning()) {
+ SourceManager &SM = PP.getSourceManager();
+ return *SM.getCharacterData(SM.getSpellingLoc(Tok.getLocation())) == 'L';
+ }
+
+ if (Tok.getLength() < 256) {
+ char Buffer[256];
+ const char *TokPtr = Buffer;
+ PP.getSpelling(Tok, TokPtr);
+ return TokPtr[0] == 'L';
+ }
+
+ return PP.getSpelling(Tok)[0] == 'L';
+}
+
+/// IsIdentifierL - Return true if the spelling of this token is literally
+/// 'L'.
+bool TokenConcatenation::IsIdentifierL(const Token &Tok) const {
+ if (!Tok.needsCleaning()) {
+ if (Tok.getLength() != 1)
+ return false;
+ SourceManager &SM = PP.getSourceManager();
+ return *SM.getCharacterData(SM.getSpellingLoc(Tok.getLocation())) == 'L';
+ }
+
+ if (Tok.getLength() < 256) {
+ char Buffer[256];
+ const char *TokPtr = Buffer;
+ if (PP.getSpelling(Tok, TokPtr) != 1)
+ return false;
+ return TokPtr[0] == 'L';
+ }
+
+ return PP.getSpelling(Tok) == "L";
+}
+
+TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
+ memset(TokenInfo, 0, sizeof(TokenInfo));
+
+ // These tokens have custom code in AvoidConcat.
+ TokenInfo[tok::identifier ] |= aci_custom;
+ TokenInfo[tok::numeric_constant] |= aci_custom_firstchar;
+ TokenInfo[tok::period ] |= aci_custom_firstchar;
+ TokenInfo[tok::amp ] |= aci_custom_firstchar;
+ TokenInfo[tok::plus ] |= aci_custom_firstchar;
+ TokenInfo[tok::minus ] |= aci_custom_firstchar;
+ TokenInfo[tok::slash ] |= aci_custom_firstchar;
+ TokenInfo[tok::less ] |= aci_custom_firstchar;
+ TokenInfo[tok::greater ] |= aci_custom_firstchar;
+ TokenInfo[tok::pipe ] |= aci_custom_firstchar;
+ TokenInfo[tok::percent ] |= aci_custom_firstchar;
+ TokenInfo[tok::colon ] |= aci_custom_firstchar;
+ TokenInfo[tok::hash ] |= aci_custom_firstchar;
+ TokenInfo[tok::arrow ] |= aci_custom_firstchar;
+
+ // These tokens change behavior if followed by an '='.
+ TokenInfo[tok::amp ] |= aci_avoid_equal; // &=
+ TokenInfo[tok::plus ] |= aci_avoid_equal; // +=
+ TokenInfo[tok::minus ] |= aci_avoid_equal; // -=
+ TokenInfo[tok::slash ] |= aci_avoid_equal; // /=
+ TokenInfo[tok::less ] |= aci_avoid_equal; // <=
+ TokenInfo[tok::greater ] |= aci_avoid_equal; // >=
+ TokenInfo[tok::pipe ] |= aci_avoid_equal; // |=
+ TokenInfo[tok::percent ] |= aci_avoid_equal; // %=
+ TokenInfo[tok::star ] |= aci_avoid_equal; // *=
+ TokenInfo[tok::exclaim ] |= aci_avoid_equal; // !=
+ TokenInfo[tok::lessless ] |= aci_avoid_equal; // <<=
+ TokenInfo[tok::greaterequal] |= aci_avoid_equal; // >>=
+ TokenInfo[tok::caret ] |= aci_avoid_equal; // ^=
+ TokenInfo[tok::equal ] |= aci_avoid_equal; // ==
+}
+
+/// GetFirstChar - Get the first character of the token \arg Tok,
+/// avoiding calls to getSpelling where possible.
+static char GetFirstChar(Preprocessor &PP, const Token &Tok) {
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ // Avoid spelling identifiers, the most common form of token.
+ return II->getName()[0];
+ } else if (!Tok.needsCleaning()) {
+ if (Tok.isLiteral() && Tok.getLiteralData()) {
+ return *Tok.getLiteralData();
+ } else {
+ SourceManager &SM = PP.getSourceManager();
+ return *SM.getCharacterData(SM.getSpellingLoc(Tok.getLocation()));
+ }
+ } else if (Tok.getLength() < 256) {
+ char Buffer[256];
+ const char *TokPtr = Buffer;
+ PP.getSpelling(Tok, TokPtr);
+ return TokPtr[0];
+ } else {
+ return PP.getSpelling(Tok)[0];
+ }
+}
+
+/// AvoidConcat - If printing PrevTok immediately followed by Tok would cause
+/// the two individual tokens to be lexed as a single token, return true
+/// (which causes a space to be printed between them). This allows the output
+/// of -E mode to be lexed to the same token stream as lexing the input
+/// directly would.
+///
+/// This code must conservatively return true if it doesn't want to be 100%
+/// accurate. This will cause the output to include extra space characters,
+/// but the resulting output won't have incorrect concatenations going on.
+/// Examples include "..", which we print with a space between, because we
+/// don't want to track enough to tell "x.." from "...".
+bool TokenConcatenation::AvoidConcat(const Token &PrevTok,
+ const Token &Tok) const {
+ // First, check to see if the tokens were directly adjacent in the original
+ // source. If they were, it must be okay to stick them together: if there
+ // were an issue, the tokens would have been lexed differently.
+ if (PrevTok.getLocation().isFileID() && Tok.getLocation().isFileID() &&
+ PrevTok.getLocation().getFileLocWithOffset(PrevTok.getLength()) ==
+ Tok.getLocation())
+ return false;
+
+ tok::TokenKind PrevKind = PrevTok.getKind();
+ if (PrevTok.getIdentifierInfo()) // Language keyword or named operator.
+ PrevKind = tok::identifier;
+
+ // Look up information on when we should avoid concatenation with prevtok.
+ unsigned ConcatInfo = TokenInfo[PrevKind];
+
+ // If prevtok never causes a problem for anything after it, return quickly.
+ if (ConcatInfo == 0) return false;
+
+ if (ConcatInfo & aci_avoid_equal) {
+ // If the next token is '=' or '==', avoid concatenation.
+ if (Tok.is(tok::equal) || Tok.is(tok::equalequal))
+ return true;
+ ConcatInfo &= ~aci_avoid_equal;
+ }
+
+ if (ConcatInfo == 0) return false;
+
+ // Basic algorithm: we look at the first character of the second token, and
+ // determine whether it, if appended to the first token, would form (or
+ // would contribute) to a larger token if concatenated.
+ char FirstChar = 0;
+ if (ConcatInfo & aci_custom) {
+ // If the token does not need to know the first character, don't get it.
+ } else {
+ FirstChar = GetFirstChar(PP, Tok);
+ }
+
+ switch (PrevKind) {
+ default: assert(0 && "InitAvoidConcatTokenInfo built wrong");
+ case tok::identifier: // id+id or id+number or id+L"foo".
+ // id+'.'... will not append.
+ if (Tok.is(tok::numeric_constant))
+ return GetFirstChar(PP, Tok) != '.';
+
+ if (Tok.getIdentifierInfo() || Tok.is(tok::wide_string_literal) /* ||
+ Tok.is(tok::wide_char_literal)*/)
+ return true;
+
+ // If this isn't identifier + string, we're done.
+ if (Tok.isNot(tok::char_constant) && Tok.isNot(tok::string_literal))
+ return false;
+
+ // FIXME: need a wide_char_constant!
+
+ // If the string was a wide string L"foo" or wide char L'f', it would
+ // concat with the previous identifier into fooL"bar". Avoid this.
+ if (StartsWithL(Tok))
+ return true;
+
+ // Otherwise, this is a narrow character or string. If the *identifier*
+ // is a literal 'L', avoid pasting L "foo" -> L"foo".
+ return IsIdentifierL(PrevTok);
+ case tok::numeric_constant:
+ return isalnum(FirstChar) || Tok.is(tok::numeric_constant) ||
+ FirstChar == '+' || FirstChar == '-' || FirstChar == '.';
+ case tok::period: // ..., .*, .1234
+ return FirstChar == '.' || isdigit(FirstChar) || FirstChar == '*';
+ case tok::amp: // &&
+ return FirstChar == '&';
+ case tok::plus: // ++
+ return FirstChar == '+';
+ case tok::minus: // --, ->, ->*
+ return FirstChar == '-' || FirstChar == '>';
+ case tok::slash: //, /*, //
+ return FirstChar == '*' || FirstChar == '/';
+ case tok::less: // <<, <<=, <:, <%
+ return FirstChar == '<' || FirstChar == ':' || FirstChar == '%';
+ case tok::greater: // >>, >>=
+ return FirstChar == '>';
+ case tok::pipe: // ||
+ return FirstChar == '|';
+ case tok::percent: // %>, %:
+ return FirstChar == '>' || FirstChar == ':';
+ case tok::colon: // ::, :>
+ return FirstChar == ':' ||FirstChar == '>';
+ case tok::hash: // ##, #@, %:%:
+ return FirstChar == '#' || FirstChar == '@' || FirstChar == '%';
+ case tok::arrow: // ->*
+ return FirstChar == '*';
+ }
+}
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
new file mode 100644
index 0000000..f9f9386
--- /dev/null
+++ b/lib/Lex/TokenLexer.cpp
@@ -0,0 +1,542 @@
+//===--- TokenLexer.cpp - Lex from a token stream -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/TokenLexer.h"
+#include "MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+
+
+/// Create a TokenLexer for the specified macro with the specified actual
+/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
+void TokenLexer::Init(Token &Tok, SourceLocation ILEnd, MacroArgs *Actuals) {
+ // If the client is reusing a TokenLexer, make sure to free any memory
+ // associated with it.
+ destroy();
+
+ Macro = PP.getMacroInfo(Tok.getIdentifierInfo());
+ ActualArgs = Actuals;
+ CurToken = 0;
+
+ InstantiateLocStart = Tok.getLocation();
+ InstantiateLocEnd = ILEnd;
+ AtStartOfLine = Tok.isAtStartOfLine();
+ HasLeadingSpace = Tok.hasLeadingSpace();
+ Tokens = &*Macro->tokens_begin();
+ OwnsTokens = false;
+ DisableMacroExpansion = false;
+ NumTokens = Macro->tokens_end()-Macro->tokens_begin();
+
+ // If this is a function-like macro, expand the arguments and change
+ // Tokens to point to the expanded tokens.
+ if (Macro->isFunctionLike() && Macro->getNumArgs())
+ ExpandFunctionArguments();
+
+ // Mark the macro as currently disabled, so that it is not recursively
+ // expanded. The macro must be disabled only after argument pre-expansion of
+ // function-like macro arguments occurs.
+ Macro->DisableMacro();
+}
+
+
+
+/// Create a TokenLexer for the specified token stream. This does not
+/// take ownership of the specified token vector.
+void TokenLexer::Init(const Token *TokArray, unsigned NumToks,
+ bool disableMacroExpansion, bool ownsTokens) {
+ // If the client is reusing a TokenLexer, make sure to free any memory
+ // associated with it.
+ destroy();
+
+ Macro = 0;
+ ActualArgs = 0;
+ Tokens = TokArray;
+ OwnsTokens = ownsTokens;
+ DisableMacroExpansion = disableMacroExpansion;
+ NumTokens = NumToks;
+ CurToken = 0;
+ InstantiateLocStart = InstantiateLocEnd = SourceLocation();
+ AtStartOfLine = false;
+ HasLeadingSpace = false;
+
+ // Set HasLeadingSpace/AtStartOfLine so that the first token will be
+ // returned unmodified.
+ if (NumToks != 0) {
+ AtStartOfLine = TokArray[0].isAtStartOfLine();
+ HasLeadingSpace = TokArray[0].hasLeadingSpace();
+ }
+}
+
+
+void TokenLexer::destroy() {
+ // If this was a function-like macro that actually uses its arguments, delete
+ // the expanded tokens.
+ if (OwnsTokens) {
+ delete [] Tokens;
+ Tokens = 0;
+ OwnsTokens = false;
+ }
+
+ // TokenLexer owns its formal arguments.
+ if (ActualArgs) ActualArgs->destroy();
+}
+
+/// Expand the arguments of a function-like macro so that we can quickly
+/// return preexpanded tokens from Tokens.
+void TokenLexer::ExpandFunctionArguments() {
+ llvm::SmallVector<Token, 128> ResultToks;
+
+ // Loop through 'Tokens', expanding them into ResultToks. Keep
+ // track of whether we change anything. If not, no need to keep them. If so,
+ // we install the newly expanded sequence as the new 'Tokens' list.
+ bool MadeChange = false;
+
+ // NextTokGetsSpace - When this is true, the next token appended to the
+ // output list will get a leading space, regardless of whether it had one to
+ // begin with or not. This is used for placemarker support.
+ bool NextTokGetsSpace = false;
+
+ for (unsigned i = 0, e = NumTokens; i != e; ++i) {
+ // If we found the stringify operator, get the argument stringified. The
+ // preprocessor already verified that the following token is a macro name
+ // when the #define was parsed.
+ const Token &CurTok = Tokens[i];
+ if (CurTok.is(tok::hash) || CurTok.is(tok::hashat)) {
+ int ArgNo = Macro->getArgumentNum(Tokens[i+1].getIdentifierInfo());
+ assert(ArgNo != -1 && "Token following # is not an argument?");
+
+ Token Res;
+ if (CurTok.is(tok::hash)) // Stringify
+ Res = ActualArgs->getStringifiedArgument(ArgNo, PP);
+ else {
+ // 'charify': don't bother caching these.
+ Res = MacroArgs::StringifyArgument(ActualArgs->getUnexpArgument(ArgNo),
+ PP, true);
+ }
+
+ // The stringified/charified string leading space flag gets set to match
+ // the #/#@ operator.
+ if (CurTok.hasLeadingSpace() || NextTokGetsSpace)
+ Res.setFlag(Token::LeadingSpace);
+
+ ResultToks.push_back(Res);
+ MadeChange = true;
+ ++i; // Skip arg name.
+ NextTokGetsSpace = false;
+ continue;
+ }
+
+ // Otherwise, if this is not an argument token, just add the token to the
+ // output buffer.
+ IdentifierInfo *II = CurTok.getIdentifierInfo();
+ int ArgNo = II ? Macro->getArgumentNum(II) : -1;
+ if (ArgNo == -1) {
+ // This isn't an argument, just add it.
+ ResultToks.push_back(CurTok);
+
+ if (NextTokGetsSpace) {
+ ResultToks.back().setFlag(Token::LeadingSpace);
+ NextTokGetsSpace = false;
+ }
+ continue;
+ }
+
+ // An argument is expanded somehow, the result is different than the
+ // input.
+ MadeChange = true;
+
+ // Otherwise, this is a use of the argument. Find out if there is a paste
+ // (##) operator before or after the argument.
+ bool PasteBefore =
+ !ResultToks.empty() && ResultToks.back().is(tok::hashhash);
+ bool PasteAfter = i+1 != e && Tokens[i+1].is(tok::hashhash);
+
+ // If it is not the LHS/RHS of a ## operator, we must pre-expand the
+ // argument and substitute the expanded tokens into the result. This is
+ // C99 6.10.3.1p1.
+ if (!PasteBefore && !PasteAfter) {
+ const Token *ResultArgToks;
+
+ // Only preexpand the argument if it could possibly need it. This
+ // avoids some work in common cases.
+ const Token *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
+ if (ActualArgs->ArgNeedsPreexpansion(ArgTok, PP))
+ ResultArgToks = &ActualArgs->getPreExpArgument(ArgNo, PP)[0];
+ else
+ ResultArgToks = ArgTok; // Use non-preexpanded tokens.
+
+ // If the arg token expanded into anything, append it.
+ if (ResultArgToks->isNot(tok::eof)) {
+ unsigned FirstResult = ResultToks.size();
+ unsigned NumToks = MacroArgs::getArgLength(ResultArgToks);
+ ResultToks.append(ResultArgToks, ResultArgToks+NumToks);
+
+ // If any tokens were substituted from the argument, the whitespace
+ // before the first token should match the whitespace of the arg
+ // identifier.
+ ResultToks[FirstResult].setFlagValue(Token::LeadingSpace,
+ CurTok.hasLeadingSpace() ||
+ NextTokGetsSpace);
+ NextTokGetsSpace = false;
+ } else {
+ // If this is an empty argument, and if there was whitespace before the
+ // formal token, make sure the next token gets whitespace before it.
+ NextTokGetsSpace = CurTok.hasLeadingSpace();
+ }
+ continue;
+ }
+
+ // Okay, we have a token that is either the LHS or RHS of a paste (##)
+ // argument. It gets substituted as its non-pre-expanded tokens.
+ const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
+ unsigned NumToks = MacroArgs::getArgLength(ArgToks);
+ if (NumToks) { // Not an empty argument?
+ // If this is the GNU ", ## __VA_ARG__" extension, and we just learned
+ // that __VA_ARG__ expands to multiple tokens, avoid a pasting error when
+ // the expander trys to paste ',' with the first token of the __VA_ARG__
+ // expansion.
+ if (PasteBefore && ResultToks.size() >= 2 &&
+ ResultToks[ResultToks.size()-2].is(tok::comma) &&
+ (unsigned)ArgNo == Macro->getNumArgs()-1 &&
+ Macro->isVariadic()) {
+ // Remove the paste operator, report use of the extension.
+ PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
+ ResultToks.pop_back();
+ }
+
+ ResultToks.append(ArgToks, ArgToks+NumToks);
+
+ // If this token (the macro argument) was supposed to get leading
+ // whitespace, transfer this information onto the first token of the
+ // expansion.
+ //
+ // Do not do this if the paste operator occurs before the macro argument,
+ // as in "A ## MACROARG". In valid code, the first token will get
+ // smooshed onto the preceding one anyway (forming AMACROARG). In
+ // assembler-with-cpp mode, invalid pastes are allowed through: in this
+ // case, we do not want the extra whitespace to be added. For example,
+ // we want ". ## foo" -> ".foo" not ". foo".
+ if ((CurTok.hasLeadingSpace() || NextTokGetsSpace) &&
+ !PasteBefore)
+ ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);
+
+ NextTokGetsSpace = false;
+ continue;
+ }
+
+ // If an empty argument is on the LHS or RHS of a paste, the standard (C99
+ // 6.10.3.3p2,3) calls for a bunch of placemarker stuff to occur. We
+ // implement this by eating ## operators when a LHS or RHS expands to
+ // empty.
+ NextTokGetsSpace |= CurTok.hasLeadingSpace();
+ if (PasteAfter) {
+ // Discard the argument token and skip (don't copy to the expansion
+ // buffer) the paste operator after it.
+ NextTokGetsSpace |= Tokens[i+1].hasLeadingSpace();
+ ++i;
+ continue;
+ }
+
+ // If this is on the RHS of a paste operator, we've already copied the
+ // paste operator to the ResultToks list. Remove it.
+ assert(PasteBefore && ResultToks.back().is(tok::hashhash));
+ NextTokGetsSpace |= ResultToks.back().hasLeadingSpace();
+ ResultToks.pop_back();
+
+ // If this is the __VA_ARGS__ token, and if the argument wasn't provided,
+ // and if the macro had at least one real argument, and if the token before
+ // the ## was a comma, remove the comma.
+ if ((unsigned)ArgNo == Macro->getNumArgs()-1 && // is __VA_ARGS__
+ ActualArgs->isVarargsElidedUse() && // Argument elided.
+ !ResultToks.empty() && ResultToks.back().is(tok::comma)) {
+ // Never add a space, even if the comma, ##, or arg had a space.
+ NextTokGetsSpace = false;
+ // Remove the paste operator, report use of the extension.
+ PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
+ ResultToks.pop_back();
+ }
+ continue;
+ }
+
+ // If anything changed, install this as the new Tokens list.
+ if (MadeChange) {
+ assert(!OwnsTokens && "This would leak if we already own the token list");
+ // This is deleted in the dtor.
+ NumTokens = ResultToks.size();
+ llvm::BumpPtrAllocator &Alloc = PP.getPreprocessorAllocator();
+ Token *Res =
+ static_cast<Token *>(Alloc.Allocate(sizeof(Token)*ResultToks.size(),
+ llvm::alignof<Token>()));
+ if (NumTokens)
+ memcpy(Res, &ResultToks[0], NumTokens*sizeof(Token));
+ Tokens = Res;
+
+ // The preprocessor bump pointer owns these tokens, not us.
+ OwnsTokens = false;
+ }
+}
+
+/// Lex - Lex and return a token from this macro stream.
+///
+void TokenLexer::Lex(Token &Tok) {
+ // Lexing off the end of the macro, pop this macro off the expansion stack.
+ if (isAtEnd()) {
+ // If this is a macro (not a token stream), mark the macro enabled now
+ // that it is no longer being expanded.
+ if (Macro) Macro->EnableMacro();
+
+ // Pop this context off the preprocessors lexer stack and get the next
+ // token. This will delete "this" so remember the PP instance var.
+ Preprocessor &PPCache = PP;
+ if (PP.HandleEndOfTokenLexer(Tok))
+ return;
+
+ // HandleEndOfTokenLexer may not return a token. If it doesn't, lex
+ // whatever is next.
+ return PPCache.Lex(Tok);
+ }
+
+ // If this is the first token of the expanded result, we inherit spacing
+ // properties later.
+ bool isFirstToken = CurToken == 0;
+
+ // Get the next token to return.
+ Tok = Tokens[CurToken++];
+
+ bool TokenIsFromPaste = false;
+
+ // If this token is followed by a token paste (##) operator, paste the tokens!
+ if (!isAtEnd() && Tokens[CurToken].is(tok::hashhash)) {
+ if (PasteTokens(Tok)) {
+ // When handling the microsoft /##/ extension, the final token is
+ // returned by PasteTokens, not the pasted token.
+ return;
+ } else {
+ TokenIsFromPaste = true;
+ }
+ }
+
+ // The token's current location indicate where the token was lexed from. We
+ // need this information to compute the spelling of the token, but any
+ // diagnostics for the expanded token should appear as if they came from
+ // InstantiationLoc. Pull this information together into a new SourceLocation
+ // that captures all of this.
+ if (InstantiateLocStart.isValid()) { // Don't do this for token streams.
+ SourceManager &SM = PP.getSourceManager();
+ Tok.setLocation(SM.createInstantiationLoc(Tok.getLocation(),
+ InstantiateLocStart,
+ InstantiateLocEnd,
+ Tok.getLength()));
+ }
+
+ // If this is the first token, set the lexical properties of the token to
+ // match the lexical properties of the macro identifier.
+ if (isFirstToken) {
+ Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
+ Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
+ }
+
+ // Handle recursive expansion!
+ if (!Tok.isAnnotation() && Tok.getIdentifierInfo() != 0) {
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Tok.setKind(II->getTokenID());
+
+ // If this identifier was poisoned and from a paste, emit an error. This
+ // won't be handled by Preprocessor::HandleIdentifier because this is coming
+ // from a macro expansion.
+ if (II->isPoisoned() && TokenIsFromPaste) {
+ // We warn about __VA_ARGS__ with poisoning.
+ if (II->isStr("__VA_ARGS__"))
+ PP.Diag(Tok, diag::ext_pp_bad_vaargs_use);
+ else
+ PP.Diag(Tok, diag::err_pp_used_poisoned_id);
+ }
+
+ if (!DisableMacroExpansion && II->isHandleIdentifierCase())
+ PP.HandleIdentifier(Tok);
+ }
+
+ // Otherwise, return a normal token.
+}
+
+/// PasteTokens - Tok is the LHS of a ## operator, and CurToken is the ##
+/// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
+/// are more ## after it, chomp them iteratively. Return the result as Tok.
+/// If this returns true, the caller should immediately return the token.
+bool TokenLexer::PasteTokens(Token &Tok) {
+ llvm::SmallVector<char, 128> Buffer;
+ const char *ResultTokStrPtr = 0;
+ do {
+ // Consume the ## operator.
+ SourceLocation PasteOpLoc = Tokens[CurToken].getLocation();
+ ++CurToken;
+ assert(!isAtEnd() && "No token on the RHS of a paste operator!");
+
+ // Get the RHS token.
+ const Token &RHS = Tokens[CurToken];
+
+ // Allocate space for the result token. This is guaranteed to be enough for
+ // the two tokens.
+ Buffer.resize(Tok.getLength() + RHS.getLength());
+
+ // Get the spelling of the LHS token in Buffer.
+ const char *BufPtr = &Buffer[0];
+ unsigned LHSLen = PP.getSpelling(Tok, BufPtr);
+ if (BufPtr != &Buffer[0]) // Really, we want the chars in Buffer!
+ memcpy(&Buffer[0], BufPtr, LHSLen);
+
+ BufPtr = &Buffer[LHSLen];
+ unsigned RHSLen = PP.getSpelling(RHS, BufPtr);
+ if (BufPtr != &Buffer[LHSLen]) // Really, we want the chars in Buffer!
+ memcpy(&Buffer[LHSLen], BufPtr, RHSLen);
+
+ // Trim excess space.
+ Buffer.resize(LHSLen+RHSLen);
+
+ // Plop the pasted result (including the trailing newline and null) into a
+ // scratch buffer where we can lex it.
+ Token ResultTokTmp;
+ ResultTokTmp.startToken();
+
+ // Claim that the tmp token is a string_literal so that we can get the
+ // character pointer back from CreateString.
+ ResultTokTmp.setKind(tok::string_literal);
+ PP.CreateString(&Buffer[0], Buffer.size(), ResultTokTmp);
+ SourceLocation ResultTokLoc = ResultTokTmp.getLocation();
+ ResultTokStrPtr = ResultTokTmp.getLiteralData();
+
+ // Lex the resultant pasted token into Result.
+ Token Result;
+
+ if (Tok.is(tok::identifier) && RHS.is(tok::identifier)) {
+ // Common paste case: identifier+identifier = identifier. Avoid creating
+ // a lexer and other overhead.
+ PP.IncrementPasteCounter(true);
+ Result.startToken();
+ Result.setKind(tok::identifier);
+ Result.setLocation(ResultTokLoc);
+ Result.setLength(LHSLen+RHSLen);
+ } else {
+ PP.IncrementPasteCounter(false);
+
+ assert(ResultTokLoc.isFileID() &&
+ "Should be a raw location into scratch buffer");
+ SourceManager &SourceMgr = PP.getSourceManager();
+ FileID LocFileID = SourceMgr.getFileID(ResultTokLoc);
+
+ const char *ScratchBufStart = SourceMgr.getBufferData(LocFileID).first;
+
+ // Make a lexer to lex this string from. Lex just this one token.
+ // Make a lexer object so that we lex and expand the paste result.
+ Lexer TL(SourceMgr.getLocForStartOfFile(LocFileID),
+ PP.getLangOptions(), ScratchBufStart,
+ ResultTokStrPtr, ResultTokStrPtr+LHSLen+RHSLen);
+
+ // Lex a token in raw mode. This way it won't look up identifiers
+ // automatically, lexing off the end will return an eof token, and
+ // warnings are disabled. This returns true if the result token is the
+ // entire buffer.
+ bool isInvalid = !TL.LexFromRawLexer(Result);
+
+ // If we got an EOF token, we didn't form even ONE token. For example, we
+ // did "/ ## /" to get "//".
+ isInvalid |= Result.is(tok::eof);
+
+ // If pasting the two tokens didn't form a full new token, this is an
+ // error. This occurs with "x ## +" and other stuff. Return with Tok
+ // unmodified and with RHS as the next token to lex.
+ if (isInvalid) {
+ // Test for the Microsoft extension of /##/ turning into // here on the
+ // error path.
+ if (PP.getLangOptions().Microsoft && Tok.is(tok::slash) &&
+ RHS.is(tok::slash)) {
+ HandleMicrosoftCommentPaste(Tok);
+ return true;
+ }
+
+ // Do not emit the warning when preprocessing assembler code.
+ if (!PP.getLangOptions().AsmPreprocessor) {
+ // Explicitly convert the token location to have proper instantiation
+ // information so that the user knows where it came from.
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation Loc =
+ SM.createInstantiationLoc(PasteOpLoc, InstantiateLocStart,
+ InstantiateLocEnd, 2);
+ PP.Diag(Loc, diag::err_pp_bad_paste)
+ << std::string(Buffer.begin(), Buffer.end());
+ }
+
+ // Do not consume the RHS.
+ --CurToken;
+ }
+
+ // Turn ## into 'unknown' to avoid # ## # from looking like a paste
+ // operator.
+ if (Result.is(tok::hashhash))
+ Result.setKind(tok::unknown);
+ }
+
+ // Transfer properties of the LHS over the the Result.
+ Result.setFlagValue(Token::StartOfLine , Tok.isAtStartOfLine());
+ Result.setFlagValue(Token::LeadingSpace, Tok.hasLeadingSpace());
+
+ // Finally, replace LHS with the result, consume the RHS, and iterate.
+ ++CurToken;
+ Tok = Result;
+ } while (!isAtEnd() && Tokens[CurToken].is(tok::hashhash));
+
+ // Now that we got the result token, it will be subject to expansion. Since
+ // token pasting re-lexes the result token in raw mode, identifier information
+ // isn't looked up. As such, if the result is an identifier, look up id info.
+ if (Tok.is(tok::identifier)) {
+ // Look up the identifier info for the token. We disabled identifier lookup
+ // by saying we're skipping contents, so we need to do this manually.
+ IdentifierInfo *II = PP.LookUpIdentifierInfo(Tok, ResultTokStrPtr);
+ Tok.setIdentifierInfo(II);
+ }
+ return false;
+}
+
+/// isNextTokenLParen - If the next token lexed will pop this macro off the
+/// expansion stack, return 2. If the next unexpanded token is a '(', return
+/// 1, otherwise return 0.
+unsigned TokenLexer::isNextTokenLParen() const {
+ // Out of tokens?
+ if (isAtEnd())
+ return 2;
+ return Tokens[CurToken].is(tok::l_paren);
+}
+
+
+/// HandleMicrosoftCommentPaste - In microsoft compatibility mode, /##/ pastes
+/// together to form a comment that comments out everything in the current
+/// macro, other active macros, and anything left on the current physical
+/// source line of the instantiated buffer. Handle this by returning the
+/// first token on the next line.
+void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok) {
+ // We 'comment out' the rest of this macro by just ignoring the rest of the
+ // tokens that have not been lexed yet, if any.
+
+ // Since this must be a macro, mark the macro enabled now that it is no longer
+ // being expanded.
+ assert(Macro && "Token streams can't paste comments");
+ Macro->EnableMacro();
+
+ PP.HandleMicrosoftCommentPaste(Tok);
+}
diff --git a/lib/Makefile b/lib/Makefile
new file mode 100755
index 0000000..50ed94a
--- /dev/null
+++ b/lib/Makefile
@@ -0,0 +1,15 @@
+##===- lib/Makefile ----------------------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+LEVEL = ../../..
+
+PARALLEL_DIRS = Headers Basic Lex Parse AST Sema CodeGen Analysis Rewrite \
+ Frontend Driver
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Parse/AttributeList.cpp b/lib/Parse/AttributeList.cpp
new file mode 100644
index 0000000..0170a06
--- /dev/null
+++ b/lib/Parse/AttributeList.cpp
@@ -0,0 +1,145 @@
+//===--- AttributeList.cpp --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AttributeList class implementation
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/AttributeList.h"
+#include "clang/Basic/IdentifierTable.h"
+using namespace clang;
+
+AttributeList::AttributeList(IdentifierInfo *aName, SourceLocation aLoc,
+ IdentifierInfo *pName, SourceLocation pLoc,
+ ActionBase::ExprTy **ExprList, unsigned numArgs,
+ AttributeList *n)
+ : AttrName(aName), AttrLoc(aLoc), ParmName(pName), ParmLoc(pLoc),
+ NumArgs(numArgs), Next(n) {
+
+ if (numArgs == 0)
+ Args = 0;
+ else {
+ Args = new ActionBase::ExprTy*[numArgs];
+ memcpy(Args, ExprList, numArgs*sizeof(Args[0]));
+ }
+}
+
+AttributeList::~AttributeList() {
+ if (Args) {
+ // FIXME: before we delete the vector, we need to make sure the Expr's
+ // have been deleted. Since ActionBase::ExprTy is "void", we are dependent
+ // on the actions module for actually freeing the memory. The specific
+ // hooks are ActOnDeclarator, ActOnTypeName, ActOnParamDeclaratorType,
+ // ParseField, ParseTag. Once these routines have freed the expression,
+ // they should zero out the Args slot (to indicate the memory has been
+ // freed). If any element of the vector is non-null, we should assert.
+ delete [] Args;
+ }
+ delete Next;
+}
+
+AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
+ const char *Str = Name->getName();
+ unsigned Len = Name->getLength();
+
+ // Normalize the attribute name, __foo__ becomes foo.
+ if (Len > 4 && Str[0] == '_' && Str[1] == '_' &&
+ Str[Len - 2] == '_' && Str[Len - 1] == '_') {
+ Str += 2;
+ Len -= 4;
+ }
+
+ // FIXME: Hand generating this is neither smart nor efficient.
+ switch (Len) {
+ case 4:
+ if (!memcmp(Str, "weak", 4)) return AT_weak;
+ if (!memcmp(Str, "pure", 4)) return AT_pure;
+ if (!memcmp(Str, "mode", 4)) return AT_mode;
+ if (!memcmp(Str, "used", 4)) return AT_used;
+ break;
+ case 5:
+ if (!memcmp(Str, "alias", 5)) return AT_alias;
+ if (!memcmp(Str, "const", 5)) return AT_const;
+ break;
+ case 6:
+ if (!memcmp(Str, "packed", 6)) return AT_packed;
+ if (!memcmp(Str, "malloc", 6)) return IgnoredAttribute; // FIXME: noalias.
+ if (!memcmp(Str, "format", 6)) return AT_format;
+ if (!memcmp(Str, "unused", 6)) return AT_unused;
+ if (!memcmp(Str, "blocks", 6)) return AT_blocks;
+ break;
+ case 7:
+ if (!memcmp(Str, "aligned", 7)) return AT_aligned;
+ if (!memcmp(Str, "cleanup", 7)) return AT_cleanup;
+ if (!memcmp(Str, "nodebug", 7)) return AT_nodebug;
+ if (!memcmp(Str, "nonnull", 7)) return AT_nonnull;
+ if (!memcmp(Str, "nothrow", 7)) return AT_nothrow;
+ if (!memcmp(Str, "objc_gc", 7)) return AT_objc_gc;
+ if (!memcmp(Str, "regparm", 7)) return AT_regparm;
+ if (!memcmp(Str, "section", 7)) return AT_section;
+ if (!memcmp(Str, "stdcall", 7)) return AT_stdcall;
+ break;
+ case 8:
+ if (!memcmp(Str, "annotate", 8)) return AT_annotate;
+ if (!memcmp(Str, "noreturn", 8)) return AT_noreturn;
+ if (!memcmp(Str, "noinline", 8)) return AT_noinline;
+ if (!memcmp(Str, "fastcall", 8)) return AT_fastcall;
+ if (!memcmp(Str, "iboutlet", 8)) return AT_IBOutlet;
+ if (!memcmp(Str, "sentinel", 8)) return AT_sentinel;
+ if (!memcmp(Str, "NSObject", 8)) return AT_nsobject;
+ break;
+ case 9:
+ if (!memcmp(Str, "dllimport", 9)) return AT_dllimport;
+ if (!memcmp(Str, "dllexport", 9)) return AT_dllexport;
+ if (!memcmp(Str, "may_alias", 9)) return IgnoredAttribute; // FIXME: TBAA
+ break;
+ case 10:
+ if (!memcmp(Str, "deprecated", 10)) return AT_deprecated;
+ if (!memcmp(Str, "visibility", 10)) return AT_visibility;
+ if (!memcmp(Str, "destructor", 10)) return AT_destructor;
+ if (!memcmp(Str, "format_arg", 10)) return AT_format_arg;
+ if (!memcmp(Str, "gnu_inline", 10)) return AT_gnu_inline;
+ break;
+ case 11:
+ if (!memcmp(Str, "weak_import", 11)) return AT_weak_import;
+ if (!memcmp(Str, "vector_size", 11)) return AT_vector_size;
+ if (!memcmp(Str, "constructor", 11)) return AT_constructor;
+ if (!memcmp(Str, "unavailable", 11)) return AT_unavailable;
+ break;
+ case 12:
+ if (!memcmp(Str, "overloadable", 12)) return AT_overloadable;
+ break;
+ case 13:
+ if (!memcmp(Str, "address_space", 13)) return AT_address_space;
+ if (!memcmp(Str, "always_inline", 13)) return AT_always_inline;
+ break;
+ case 14:
+ if (!memcmp(Str, "objc_exception", 14)) return AT_objc_exception;
+ break;
+ case 15:
+ if (!memcmp(Str, "ext_vector_type", 15)) return AT_ext_vector_type;
+ break;
+ case 17:
+ if (!memcmp(Str, "transparent_union", 17)) return AT_transparent_union;
+ if (!memcmp(Str, "analyzer_noreturn", 17)) return AT_analyzer_noreturn;
+ break;
+ case 18:
+ if (!memcmp(Str, "warn_unused_result", 18)) return AT_warn_unused_result;
+ break;
+ case 19:
+ if (!memcmp(Str, "ns_returns_retained", 19)) return AT_ns_returns_retained;
+ if (!memcmp(Str, "cf_returns_retained", 19)) return AT_cf_returns_retained;
+ break;
+ case 22:
+ if (!memcmp(Str, "no_instrument_function", 22))
+ return AT_no_instrument_function;
+ break;
+ }
+ return UnknownAttribute;
+}
diff --git a/lib/Parse/CMakeLists.txt b/lib/Parse/CMakeLists.txt
new file mode 100644
index 0000000..8fb7cd2
--- /dev/null
+++ b/lib/Parse/CMakeLists.txt
@@ -0,0 +1,21 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangParse
+ AttributeList.cpp
+ DeclSpec.cpp
+ MinimalAction.cpp
+ ParseCXXInlineMethods.cpp
+ ParseDecl.cpp
+ ParseDeclCXX.cpp
+ ParseExpr.cpp
+ ParseExprCXX.cpp
+ ParseInit.cpp
+ ParseObjc.cpp
+ ParsePragma.cpp
+ Parser.cpp
+ ParseStmt.cpp
+ ParseTentative.cpp
+ ParseTemplate.cpp
+ )
+
+add_dependencies(clangParse ClangDiagnosticParse)
diff --git a/lib/Parse/DeclSpec.cpp b/lib/Parse/DeclSpec.cpp
new file mode 100644
index 0000000..d8c6986
--- /dev/null
+++ b/lib/Parse/DeclSpec.cpp
@@ -0,0 +1,395 @@
+//===--- SemaDeclSpec.cpp - Declaration Specifier Semantic Analysis -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for declaration specifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/STLExtras.h"
+#include <cstring>
+using namespace clang;
+
+
+static DiagnosticBuilder Diag(Diagnostic &D, SourceLocation Loc,
+ SourceManager &SrcMgr, unsigned DiagID) {
+ return D.Report(FullSourceLoc(Loc, SrcMgr), DiagID);
+}
+
+/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
+/// "TheDeclarator" is the declarator that this will be added to.
+DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
+ SourceLocation EllipsisLoc,
+ ParamInfo *ArgInfo,
+ unsigned NumArgs,
+ unsigned TypeQuals,
+ bool hasExceptionSpec,
+ SourceLocation ThrowLoc,
+ bool hasAnyExceptionSpec,
+ ActionBase::TypeTy **Exceptions,
+ SourceRange *ExceptionRanges,
+ unsigned NumExceptions,
+ SourceLocation Loc,
+ Declarator &TheDeclarator) {
+ DeclaratorChunk I;
+ I.Kind = Function;
+ I.Loc = Loc;
+ I.Fun.hasPrototype = hasProto;
+ I.Fun.isVariadic = isVariadic;
+ I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
+ I.Fun.DeleteArgInfo = false;
+ I.Fun.TypeQuals = TypeQuals;
+ I.Fun.NumArgs = NumArgs;
+ I.Fun.ArgInfo = 0;
+ I.Fun.hasExceptionSpec = hasExceptionSpec;
+ I.Fun.ThrowLoc = ThrowLoc.getRawEncoding();
+ I.Fun.hasAnyExceptionSpec = hasAnyExceptionSpec;
+ I.Fun.NumExceptions = NumExceptions;
+ I.Fun.Exceptions = 0;
+
+ // new[] an argument array if needed.
+ if (NumArgs) {
+ // If the 'InlineParams' in Declarator is unused and big enough, put our
+ // parameter list there (in an effort to avoid new/delete traffic). If it
+ // is already used (consider a function returning a function pointer) or too
+ // small (function taking too many arguments), go to the heap.
+ if (!TheDeclarator.InlineParamsUsed &&
+ NumArgs <= llvm::array_lengthof(TheDeclarator.InlineParams)) {
+ I.Fun.ArgInfo = TheDeclarator.InlineParams;
+ I.Fun.DeleteArgInfo = false;
+ TheDeclarator.InlineParamsUsed = true;
+ } else {
+ I.Fun.ArgInfo = new DeclaratorChunk::ParamInfo[NumArgs];
+ I.Fun.DeleteArgInfo = true;
+ }
+ memcpy(I.Fun.ArgInfo, ArgInfo, sizeof(ArgInfo[0])*NumArgs);
+ }
+ // new[] an exception array if needed
+ if (NumExceptions) {
+ I.Fun.Exceptions = new DeclaratorChunk::TypeAndRange[NumExceptions];
+ for (unsigned i = 0; i != NumExceptions; ++i) {
+ I.Fun.Exceptions[i].Ty = Exceptions[i];
+ I.Fun.Exceptions[i].Range = ExceptionRanges[i];
+ }
+ }
+ return I;
+}
+
+/// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this
+/// declaration specifier includes.
+///
+unsigned DeclSpec::getParsedSpecifiers() const {
+ unsigned Res = 0;
+ if (StorageClassSpec != SCS_unspecified ||
+ SCS_thread_specified)
+ Res |= PQ_StorageClassSpecifier;
+
+ if (TypeQualifiers != TQ_unspecified)
+ Res |= PQ_TypeQualifier;
+
+ if (hasTypeSpecifier())
+ Res |= PQ_TypeSpecifier;
+
+ if (FS_inline_specified || FS_virtual_specified || FS_explicit_specified)
+ Res |= PQ_FunctionSpecifier;
+ return Res;
+}
+
+const char *DeclSpec::getSpecifierName(DeclSpec::SCS S) {
+ switch (S) {
+ default: assert(0 && "Unknown typespec!");
+ case DeclSpec::SCS_unspecified: return "unspecified";
+ case DeclSpec::SCS_typedef: return "typedef";
+ case DeclSpec::SCS_extern: return "extern";
+ case DeclSpec::SCS_static: return "static";
+ case DeclSpec::SCS_auto: return "auto";
+ case DeclSpec::SCS_register: return "register";
+ case DeclSpec::SCS_private_extern: return "__private_extern__";
+ case DeclSpec::SCS_mutable: return "mutable";
+ }
+}
+
+bool DeclSpec::BadSpecifier(SCS S, const char *&PrevSpec) {
+ PrevSpec = getSpecifierName(S);
+ return true;
+}
+
+bool DeclSpec::BadSpecifier(TSW W, const char *&PrevSpec) {
+ switch (W) {
+ case TSW_unspecified: PrevSpec = "unspecified"; break;
+ case TSW_short: PrevSpec = "short"; break;
+ case TSW_long: PrevSpec = "long"; break;
+ case TSW_longlong: PrevSpec = "long long"; break;
+ }
+ return true;
+}
+
+bool DeclSpec::BadSpecifier(TSC C, const char *&PrevSpec) {
+ switch (C) {
+ case TSC_unspecified: PrevSpec = "unspecified"; break;
+ case TSC_imaginary: PrevSpec = "imaginary"; break;
+ case TSC_complex: PrevSpec = "complex"; break;
+ }
+ return true;
+}
+
+
+bool DeclSpec::BadSpecifier(TSS S, const char *&PrevSpec) {
+ switch (S) {
+ case TSS_unspecified: PrevSpec = "unspecified"; break;
+ case TSS_signed: PrevSpec = "signed"; break;
+ case TSS_unsigned: PrevSpec = "unsigned"; break;
+ }
+ return true;
+}
+
+const char *DeclSpec::getSpecifierName(DeclSpec::TST T) {
+ switch (T) {
+ default: assert(0 && "Unknown typespec!");
+ case DeclSpec::TST_unspecified: return "unspecified";
+ case DeclSpec::TST_void: return "void";
+ case DeclSpec::TST_char: return "char";
+ case DeclSpec::TST_wchar: return "wchar_t";
+ case DeclSpec::TST_int: return "int";
+ case DeclSpec::TST_float: return "float";
+ case DeclSpec::TST_double: return "double";
+ case DeclSpec::TST_bool: return "_Bool";
+ case DeclSpec::TST_decimal32: return "_Decimal32";
+ case DeclSpec::TST_decimal64: return "_Decimal64";
+ case DeclSpec::TST_decimal128: return "_Decimal128";
+ case DeclSpec::TST_enum: return "enum";
+ case DeclSpec::TST_class: return "class";
+ case DeclSpec::TST_union: return "union";
+ case DeclSpec::TST_struct: return "struct";
+ case DeclSpec::TST_typename: return "type-name";
+ case DeclSpec::TST_typeofType:
+ case DeclSpec::TST_typeofExpr: return "typeof";
+ }
+}
+
+bool DeclSpec::BadSpecifier(TST T, const char *&PrevSpec) {
+ PrevSpec = getSpecifierName(T);
+ return true;
+}
+
+bool DeclSpec::BadSpecifier(TQ T, const char *&PrevSpec) {
+ switch (T) {
+ case DeclSpec::TQ_unspecified: PrevSpec = "unspecified"; break;
+ case DeclSpec::TQ_const: PrevSpec = "const"; break;
+ case DeclSpec::TQ_restrict: PrevSpec = "restrict"; break;
+ case DeclSpec::TQ_volatile: PrevSpec = "volatile"; break;
+ }
+ return true;
+}
+
+bool DeclSpec::SetStorageClassSpec(SCS S, SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (StorageClassSpec != SCS_unspecified)
+ return BadSpecifier((SCS)StorageClassSpec, PrevSpec);
+ StorageClassSpec = S;
+ StorageClassSpecLoc = Loc;
+ assert((unsigned)S == StorageClassSpec && "SCS constants overflow bitfield");
+ return false;
+}
+
+bool DeclSpec::SetStorageClassSpecThread(SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (SCS_thread_specified) {
+ PrevSpec = "__thread";
+ return true;
+ }
+ SCS_thread_specified = true;
+ SCS_threadLoc = Loc;
+ return false;
+}
+
+
+/// These methods set the specified attribute of the DeclSpec, but return true
+/// and ignore the request if invalid (e.g. "extern" then "auto" is
+/// specified).
+bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (TypeSpecWidth != TSW_unspecified &&
+ // Allow turning long -> long long.
+ (W != TSW_longlong || TypeSpecWidth != TSW_long))
+ return BadSpecifier((TSW)TypeSpecWidth, PrevSpec);
+ TypeSpecWidth = W;
+ TSWLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecComplex(TSC C, SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (TypeSpecComplex != TSC_unspecified)
+ return BadSpecifier((TSC)TypeSpecComplex, PrevSpec);
+ TypeSpecComplex = C;
+ TSCLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecSign(TSS S, SourceLocation Loc,
+ const char *&PrevSpec) {
+ if (TypeSpecSign != TSS_unspecified)
+ return BadSpecifier((TSS)TypeSpecSign, PrevSpec);
+ TypeSpecSign = S;
+ TSSLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec, void *Rep,
+ bool Owned) {
+ if (TypeSpecType != TST_unspecified)
+ return BadSpecifier((TST)TypeSpecType, PrevSpec);
+ TypeSpecType = T;
+ TypeRep = Rep;
+ TSTLoc = Loc;
+ TypeSpecOwned = Owned;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecError() {
+ TypeSpecType = TST_error;
+ TypeRep = 0;
+ TSTLoc = SourceLocation();
+ return false;
+}
+
+bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
+ const LangOptions &Lang) {
+ // Duplicates turn into warnings pre-C99.
+ if ((TypeQualifiers & T) && !Lang.C99)
+ return BadSpecifier(T, PrevSpec);
+ TypeQualifiers |= T;
+
+ switch (T) {
+ default: assert(0 && "Unknown type qualifier!");
+ case TQ_const: TQ_constLoc = Loc; break;
+ case TQ_restrict: TQ_restrictLoc = Loc; break;
+ case TQ_volatile: TQ_volatileLoc = Loc; break;
+ }
+ return false;
+}
+
+bool DeclSpec::SetFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec){
+ // 'inline inline' is ok.
+ FS_inline_specified = true;
+ FS_inlineLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetFunctionSpecVirtual(SourceLocation Loc, const char *&PrevSpec){
+ // 'virtual virtual' is ok.
+ FS_virtual_specified = true;
+ FS_virtualLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetFunctionSpecExplicit(SourceLocation Loc, const char *&PrevSpec){
+ // 'explicit explicit' is ok.
+ FS_explicit_specified = true;
+ FS_explicitLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetFriendSpec(SourceLocation Loc, const char *&PrevSpec) {
+ if (Friend_specified) {
+ PrevSpec = "friend";
+ return true;
+ }
+
+ Friend_specified = true;
+ FriendLoc = Loc;
+ return false;
+}
+
+/// Finish - This does final analysis of the declspec, rejecting things like
+/// "_Imaginary" (lacking an FP type). This returns a diagnostic to issue or
+/// diag::NUM_DIAGNOSTICS if there is no error. After calling this method,
+/// DeclSpec is guaranteed self-consistent, even if an error occurred.
+void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
+ // Check the type specifier components first.
+ SourceManager &SrcMgr = PP.getSourceManager();
+
+ // signed/unsigned are only valid with int/char/wchar_t.
+ if (TypeSpecSign != TSS_unspecified) {
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
+ else if (TypeSpecType != TST_int &&
+ TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
+ Diag(D, TSSLoc, SrcMgr, diag::err_invalid_sign_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ // signed double -> double.
+ TypeSpecSign = TSS_unspecified;
+ }
+ }
+
+ // Validate the width of the type.
+ switch (TypeSpecWidth) {
+ case TSW_unspecified: break;
+ case TSW_short: // short int
+ case TSW_longlong: // long long int
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // short -> short int, long long -> long long int.
+ else if (TypeSpecType != TST_int) {
+ Diag(D, TSWLoc, SrcMgr,
+ TypeSpecWidth == TSW_short ? diag::err_invalid_short_spec
+ : diag::err_invalid_longlong_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ TypeSpecType = TST_int;
+ }
+ break;
+ case TSW_long: // long double, long int
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // long -> long int.
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
+ Diag(D, TSWLoc, SrcMgr, diag::err_invalid_long_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ TypeSpecType = TST_int;
+ }
+ break;
+ }
+
+ // TODO: if the implementation does not implement _Complex or _Imaginary,
+ // disallow their use. Need information about the backend.
+ if (TypeSpecComplex != TSC_unspecified) {
+ if (TypeSpecType == TST_unspecified) {
+ Diag(D, TSCLoc, SrcMgr, diag::ext_plain_complex)
+ << CodeModificationHint::CreateInsertion(
+ PP.getLocForEndOfToken(getTypeSpecComplexLoc()),
+ " double");
+ TypeSpecType = TST_double; // _Complex -> _Complex double.
+ } else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
+ // Note that this intentionally doesn't include _Complex _Bool.
+ Diag(D, TSTLoc, SrcMgr, diag::ext_integer_complex);
+ } else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
+ Diag(D, TSCLoc, SrcMgr, diag::err_invalid_complex_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ TypeSpecComplex = TSC_unspecified;
+ }
+ }
+
+ // Okay, now we can infer the real type.
+
+ // TODO: return "auto function" and other bad things based on the real type.
+
+ // 'data definition has no type or storage class'?
+}
+
+bool DeclSpec::isMissingDeclaratorOk() {
+ TST tst = getTypeSpecType();
+ return (tst == TST_union
+ || tst == TST_struct
+ || tst == TST_class
+ || tst == TST_enum
+ ) && getTypeRep() != 0 && StorageClassSpec != DeclSpec::SCS_typedef;
+}
diff --git a/lib/Parse/ExtensionRAIIObject.h b/lib/Parse/ExtensionRAIIObject.h
new file mode 100644
index 0000000..2b2bd3b
--- /dev/null
+++ b/lib/Parse/ExtensionRAIIObject.h
@@ -0,0 +1,40 @@
+//===--- ExtensionRAIIObject.h - Use RAII for __extension__ -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines and implements the ExtensionRAIIObject class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARSE_EXTENSION_RAII_OBJECT_H
+#define LLVM_CLANG_PARSE_EXTENSION_RAII_OBJECT_H
+
+#include "clang/Parse/ParseDiagnostic.h"
+
+namespace clang {
+
+ /// ExtensionRAIIObject - This saves the state of extension warnings when
+ /// constructed and disables them. When destructed, it restores them back to
+ /// the way they used to be. This is used to handle __extension__ in the
+ /// parser.
+ class ExtensionRAIIObject {
+ void operator=(const ExtensionRAIIObject &); // DO NOT IMPLEMENT
+ ExtensionRAIIObject(const ExtensionRAIIObject&); // DO NOT IMPLEMENT
+ Diagnostic &Diags;
+ public:
+ ExtensionRAIIObject(Diagnostic &diags) : Diags(diags) {
+ Diags.IncrementAllExtensionsSilenced();
+ }
+
+ ~ExtensionRAIIObject() {
+ Diags.DecrementAllExtensionsSilenced();
+ }
+ };
+}
+
+#endif
diff --git a/lib/Parse/Makefile b/lib/Parse/Makefile
new file mode 100644
index 0000000..5d69029
--- /dev/null
+++ b/lib/Parse/Makefile
@@ -0,0 +1,22 @@
+##===- clang/lib/Parse/Makefile ----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the Parser library for the C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangParse
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Parse/MinimalAction.cpp b/lib/Parse/MinimalAction.cpp
new file mode 100644
index 0000000..b018e36
--- /dev/null
+++ b/lib/Parse/MinimalAction.cpp
@@ -0,0 +1,225 @@
+//===--- MinimalAction.cpp - Implement the MinimalAction class ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MinimalAction interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+/// Out-of-line virtual destructor to provide home for ActionBase class.
+ActionBase::~ActionBase() {}
+
+/// Out-of-line virtual destructor to provide home for Action class.
+Action::~Action() {}
+
+// Defined out-of-line here because of dependecy on AttributeList
+Action::DeclPtrTy Action::ActOnUsingDirective(Scope *CurScope,
+ SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ AttributeList *AttrList) {
+
+ // FIXME: Parser seems to assume that Action::ActOn* takes ownership over
+ // passed AttributeList, however other actions don't free it, is it
+ // temporary state or bug?
+ delete AttrList;
+ return DeclPtrTy();
+}
+
+
+void PrettyStackTraceActionsDecl::print(llvm::raw_ostream &OS) const {
+ if (Loc.isValid()) {
+ Loc.print(OS, SM);
+ OS << ": ";
+ }
+ OS << Message;
+
+ std::string Name = Actions.getDeclName(TheDecl);
+ if (!Name.empty())
+ OS << " '" << Name << '\'';
+
+ OS << '\n';
+}
+
+/// TypeNameInfo - A link exists here for each scope that an identifier is
+/// defined.
+namespace {
+ struct TypeNameInfo {
+ TypeNameInfo *Prev;
+ bool isTypeName;
+
+ TypeNameInfo(bool istypename, TypeNameInfo *prev) {
+ isTypeName = istypename;
+ Prev = prev;
+ }
+ };
+
+ struct TypeNameInfoTable {
+ llvm::RecyclingAllocator<llvm::BumpPtrAllocator, TypeNameInfo> Allocator;
+
+ void AddEntry(bool isTypename, IdentifierInfo *II) {
+ TypeNameInfo *TI = Allocator.Allocate<TypeNameInfo>();
+ new (TI) TypeNameInfo(isTypename, II->getFETokenInfo<TypeNameInfo>());
+ II->setFETokenInfo(TI);
+ }
+
+ void DeleteEntry(TypeNameInfo *Entry) {
+ Entry->~TypeNameInfo();
+ Allocator.Deallocate(Entry);
+ }
+ };
+}
+
+static TypeNameInfoTable *getTable(void *TP) {
+ return static_cast<TypeNameInfoTable*>(TP);
+}
+
+MinimalAction::MinimalAction(Preprocessor &pp)
+ : Idents(pp.getIdentifierTable()), PP(pp) {
+ TypeNameInfoTablePtr = new TypeNameInfoTable();
+}
+
+MinimalAction::~MinimalAction() {
+ delete getTable(TypeNameInfoTablePtr);
+}
+
+void MinimalAction::ActOnTranslationUnitScope(SourceLocation Loc, Scope *S) {
+ TUScope = S;
+
+ TypeNameInfoTable &TNIT = *getTable(TypeNameInfoTablePtr);
+
+ if (PP.getTargetInfo().getPointerWidth(0) >= 64) {
+ // Install [u]int128_t for 64-bit targets.
+ TNIT.AddEntry(true, &Idents.get("__int128_t"));
+ TNIT.AddEntry(true, &Idents.get("__uint128_t"));
+ }
+
+ if (PP.getLangOptions().ObjC1) {
+ // Recognize the ObjC built-in type identifiers as types.
+ TNIT.AddEntry(true, &Idents.get("id"));
+ TNIT.AddEntry(true, &Idents.get("SEL"));
+ TNIT.AddEntry(true, &Idents.get("Class"));
+ TNIT.AddEntry(true, &Idents.get("Protocol"));
+ }
+}
+
+/// isTypeName - This looks at the IdentifierInfo::FETokenInfo field to
+/// determine whether the name is a type name (objc class name or typedef) or
+/// not in this scope.
+///
+/// FIXME: Use the passed CXXScopeSpec for accurate C++ type checking.
+Action::TypeTy *
+MinimalAction::getTypeName(IdentifierInfo &II, SourceLocation Loc,
+ Scope *S, const CXXScopeSpec *SS) {
+ if (TypeNameInfo *TI = II.getFETokenInfo<TypeNameInfo>())
+ if (TI->isTypeName)
+ return TI;
+ return 0;
+}
+
+/// isCurrentClassName - Always returns false, because MinimalAction
+/// does not support C++ classes with constructors.
+bool MinimalAction::isCurrentClassName(const IdentifierInfo &, Scope *,
+ const CXXScopeSpec *) {
+ return false;
+}
+
+TemplateNameKind
+MinimalAction::isTemplateName(const IdentifierInfo &II, Scope *S,
+ TemplateTy &TemplateDecl,
+ const CXXScopeSpec *SS) {
+ return TNK_Non_template;
+}
+
+/// ActOnDeclarator - If this is a typedef declarator, we modify the
+/// IdentifierInfo::FETokenInfo field to keep track of this fact, until S is
+/// popped.
+Action::DeclPtrTy
+MinimalAction::ActOnDeclarator(Scope *S, Declarator &D) {
+ IdentifierInfo *II = D.getIdentifier();
+
+ // If there is no identifier associated with this declarator, bail out.
+ if (II == 0) return DeclPtrTy();
+
+ TypeNameInfo *weCurrentlyHaveTypeInfo = II->getFETokenInfo<TypeNameInfo>();
+ bool isTypeName =
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef;
+
+ // this check avoids creating TypeNameInfo objects for the common case.
+ // It does need to handle the uncommon case of shadowing a typedef name with a
+ // non-typedef name. e.g. { typedef int a; a xx; { int a; } }
+ if (weCurrentlyHaveTypeInfo || isTypeName) {
+ // Allocate and add the 'TypeNameInfo' "decl".
+ getTable(TypeNameInfoTablePtr)->AddEntry(isTypeName, II);
+
+ // Remember that this needs to be removed when the scope is popped.
+ S->AddDecl(DeclPtrTy::make(II));
+ }
+ return DeclPtrTy();
+}
+
+Action::DeclPtrTy
+MinimalAction::ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *SuperName,
+ SourceLocation SuperLoc,
+ const DeclPtrTy *ProtoRefs,
+ unsigned NumProtocols,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList) {
+ // Allocate and add the 'TypeNameInfo' "decl".
+ getTable(TypeNameInfoTablePtr)->AddEntry(true, ClassName);
+ return DeclPtrTy();
+}
+
+/// ActOnForwardClassDeclaration -
+/// Scope will always be top level file scope.
+Action::DeclPtrTy
+MinimalAction::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
+ IdentifierInfo **IdentList, unsigned NumElts) {
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // Allocate and add the 'TypeNameInfo' "decl".
+ getTable(TypeNameInfoTablePtr)->AddEntry(true, IdentList[i]);
+
+ // Remember that this needs to be removed when the scope is popped.
+ TUScope->AddDecl(DeclPtrTy::make(IdentList[i]));
+ }
+ return DeclPtrTy();
+}
+
+/// ActOnPopScope - When a scope is popped, if any typedefs are now
+/// out-of-scope, they are removed from the IdentifierInfo::FETokenInfo field.
+void MinimalAction::ActOnPopScope(SourceLocation Loc, Scope *S) {
+ TypeNameInfoTable &Table = *getTable(TypeNameInfoTablePtr);
+
+ for (Scope::decl_iterator I = S->decl_begin(), E = S->decl_end();
+ I != E; ++I) {
+ IdentifierInfo &II = *(*I).getAs<IdentifierInfo>();
+ TypeNameInfo *TI = II.getFETokenInfo<TypeNameInfo>();
+ assert(TI && "This decl didn't get pushed??");
+
+ if (TI) {
+ TypeNameInfo *Next = TI->Prev;
+ Table.DeleteEntry(TI);
+
+ II.setFETokenInfo(Next);
+ }
+ }
+}
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
new file mode 100644
index 0000000..af6fab7
--- /dev/null
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -0,0 +1,271 @@
+//===--- ParseCXXInlineMethods.cpp - C++ class inline methods parsing------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements parsing for C++ class inline methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+using namespace clang;
+
+/// ParseCXXInlineMethodDef - We parsed and verified that the specified
+/// Declarator is a well formed C++ inline method definition. Now lex its body
+/// and store its tokens for parsing after the C++ class is complete.
+Parser::DeclPtrTy
+Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D) {
+ assert(D.getTypeObject(0).Kind == DeclaratorChunk::Function &&
+ "This isn't a function declarator!");
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try)) &&
+ "Current token not a '{', ':' or 'try'!");
+
+ DeclPtrTy FnD = Actions.ActOnCXXMemberDeclarator(CurScope, AS, D, 0, 0);
+
+ // Consume the tokens and store them for later parsing.
+
+ getCurrentClass().MethodDefs.push_back(LexedMethod(FnD));
+ CachedTokens &Toks = getCurrentClass().MethodDefs.back().Toks;
+
+ tok::TokenKind kind = Tok.getKind();
+ // We may have a constructor initializer or function-try-block here.
+ if (kind == tok::colon || kind == tok::kw_try) {
+ // Consume everything up to (and including) the left brace.
+ if (!ConsumeAndStoreUntil(tok::l_brace, tok::unknown, Toks, tok::semi)) {
+ // We didn't find the left-brace we expected after the
+ // constructor initializer.
+ if (Tok.is(tok::semi)) {
+ // We found a semicolon; complain, consume the semicolon, and
+ // don't try to parse this method later.
+ Diag(Tok.getLocation(), diag::err_expected_lbrace);
+ ConsumeAnyToken();
+ getCurrentClass().MethodDefs.pop_back();
+ return FnD;
+ }
+ }
+
+ } else {
+ // Begin by storing the '{' token.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ }
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, tok::unknown, Toks);
+
+ // If we're in a function-try-block, we need to store all the catch blocks.
+ if (kind == tok::kw_try) {
+ while (Tok.is(tok::kw_catch)) {
+ ConsumeAndStoreUntil(tok::l_brace, tok::unknown, Toks);
+ ConsumeAndStoreUntil(tok::r_brace, tok::unknown, Toks);
+ }
+ }
+
+ return FnD;
+}
+
+/// ParseLexedMethodDeclarations - We finished parsing the member
+/// specification of a top (non-nested) C++ class. Now go over the
+/// stack of method declarations with some parts for which parsing was
+/// delayed (such as default arguments) and parse them.
+void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(CurScope, Class.TagOrTemplate);
+
+ bool HasClassScope = !Class.TopLevelClass;
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
+ HasClassScope);
+
+ for (; !Class.MethodDecls.empty(); Class.MethodDecls.pop_front()) {
+ LateParsedMethodDeclaration &LM = Class.MethodDecls.front();
+
+ // FIXME: For member function templates, we'll need to introduce a
+ // scope for the template parameters.
+
+ // Start the delayed C++ method declaration
+ Actions.ActOnStartDelayedCXXMethodDeclaration(CurScope, LM.Method);
+
+ // Introduce the parameters into scope and parse their default
+ // arguments.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+ for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
+ // Introduce the parameter into scope.
+ Actions.ActOnDelayedCXXMethodParameter(CurScope, LM.DefaultArgs[I].Param);
+
+ if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
+ // Parse the default argument from its saved token stream.
+ Toks->push_back(Tok); // So that the current token doesn't get lost
+ PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
+
+ // Consume the previously-pushed token.
+ ConsumeAnyToken();
+
+ // Consume the '='.
+ assert(Tok.is(tok::equal) && "Default argument not starting with '='");
+ SourceLocation EqualLoc = ConsumeToken();
+
+ OwningExprResult DefArgResult(ParseAssignmentExpression());
+ if (DefArgResult.isInvalid())
+ Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param);
+ else
+ Actions.ActOnParamDefaultArgument(LM.DefaultArgs[I].Param, EqualLoc,
+ move(DefArgResult));
+ delete Toks;
+ LM.DefaultArgs[I].Toks = 0;
+ }
+ }
+ PrototypeScope.Exit();
+
+ // Finish the delayed C++ method declaration.
+ Actions.ActOnFinishDelayedCXXMethodDeclaration(CurScope, LM.Method);
+ }
+
+ for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I)
+ ParseLexedMethodDeclarations(*Class.NestedClasses[I]);
+}
+
+/// ParseLexedMethodDefs - We finished parsing the member specification of a top
+/// (non-nested) C++ class. Now go over the stack of lexed methods that were
+/// collected during its parsing and parse them all.
+void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(CurScope, Class.TagOrTemplate);
+
+ bool HasClassScope = !Class.TopLevelClass;
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
+ HasClassScope);
+
+ for (; !Class.MethodDefs.empty(); Class.MethodDefs.pop_front()) {
+ LexedMethod &LM = Class.MethodDefs.front();
+
+ assert(!LM.Toks.empty() && "Empty body!");
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LM.Toks.push_back(Tok);
+ PP.EnterTokenStream(&LM.Toks.front(), LM.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken();
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try))
+ && "Inline method not starting with '{', ':' or 'try'");
+
+ // Parse the method body. Function body parsing code is similar enough
+ // to be re-used for method bodies as well.
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
+ Actions.ActOnStartOfFunctionDef(CurScope, LM.D);
+
+ if (Tok.is(tok::kw_try)) {
+ ParseFunctionTryBlock(LM.D);
+ continue;
+ }
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(LM.D);
+ // FIXME: What if ParseConstructorInitializer doesn't leave us with a '{'??
+ ParseFunctionStatementBody(LM.D);
+ }
+
+ for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I)
+ ParseLexedMethodDefs(*Class.NestedClasses[I]);
+}
+
+/// ConsumeAndStoreUntil - Consume and store the token at the passed token
+/// container until the token 'T' is reached (which gets
+/// consumed/stored too, if ConsumeFinalToken).
+/// If EarlyAbortIf is specified, then we will stop early if we find that
+/// token at the top level.
+/// Returns true if token 'T1' or 'T2' was found.
+/// NOTE: This is a specialized version of Parser::SkipUntil.
+bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
+ CachedTokens &Toks,
+ tok::TokenKind EarlyAbortIf,
+ bool ConsumeFinalToken) {
+ // We always want this function to consume at least one token if the first
+ // token isn't T and if not at EOF.
+ bool isFirstTokenConsumed = true;
+ while (1) {
+ // If we found one of the tokens, stop and return true.
+ if (Tok.is(T1) || Tok.is(T2)) {
+ if (ConsumeFinalToken) {
+ Toks.push_back(Tok);
+ ConsumeAnyToken();
+ }
+ return true;
+ }
+
+ // If we found the early-abort token, return.
+ if (Tok.is(EarlyAbortIf))
+ return false;
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ // Ran out of tokens.
+ return false;
+
+ case tok::l_paren:
+ // Recursively consume properly-nested parens.
+ Toks.push_back(Tok);
+ ConsumeParen();
+ ConsumeAndStoreUntil(tok::r_paren, tok::unknown, Toks);
+ break;
+ case tok::l_square:
+ // Recursively consume properly-nested square brackets.
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ ConsumeAndStoreUntil(tok::r_square, tok::unknown, Toks);
+ break;
+ case tok::l_brace:
+ // Recursively consume properly-nested braces.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ ConsumeAndStoreUntil(tok::r_brace, tok::unknown, Toks);
+ break;
+
+ // Okay, we found a ']' or '}' or ')', which we think should be balanced.
+ // Since the user wasn't looking for this token (if they were, it would
+ // already be handled), this isn't balanced. If there is a LHS token at a
+ // higher level, we will assume that this matches the unbalanced token
+ // and return it. Otherwise, this is a spurious RHS token, which we skip.
+ case tok::r_paren:
+ if (ParenCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeParen();
+ break;
+ case tok::r_square:
+ if (BracketCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ break;
+ case tok::r_brace:
+ if (BraceCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ break;
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ Toks.push_back(Tok);
+ ConsumeStringToken();
+ break;
+ default:
+ // consume this token.
+ Toks.push_back(Tok);
+ ConsumeToken();
+ break;
+ }
+ isFirstTokenConsumed = false;
+ }
+}
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
new file mode 100644
index 0000000..39eaf36
--- /dev/null
+++ b/lib/Parse/ParseDecl.cpp
@@ -0,0 +1,2707 @@
+//===--- ParseDecl.cpp - Declaration Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Declaration portions of the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Scope.h"
+#include "ExtensionRAIIObject.h"
+#include "llvm/ADT/SmallSet.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// C99 6.7: Declarations.
+//===----------------------------------------------------------------------===//
+
+/// ParseTypeName
+/// type-name: [C99 6.7.6]
+/// specifier-qualifier-list abstract-declarator[opt]
+///
+/// Called type-id in C++.
+Action::TypeResult Parser::ParseTypeName(SourceRange *Range) {
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS;
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the abstract-declarator, if present.
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+ if (Range)
+ *Range = DeclaratorInfo.getSourceRange();
+
+ if (DeclaratorInfo.isInvalidType())
+ return true;
+
+ return Actions.ActOnTypeName(CurScope, DeclaratorInfo);
+}
+
+/// ParseAttributes - Parse a non-empty attributes list.
+///
+/// [GNU] attributes:
+/// attribute
+/// attributes attribute
+///
+/// [GNU] attribute:
+/// '__attribute__' '(' '(' attribute-list ')' ')'
+///
+/// [GNU] attribute-list:
+/// attrib
+/// attribute_list ',' attrib
+///
+/// [GNU] attrib:
+/// empty
+/// attrib-name
+/// attrib-name '(' identifier ')'
+/// attrib-name '(' identifier ',' nonempty-expr-list ')'
+/// attrib-name '(' argument-expression-list [C99 6.5.2] ')'
+///
+/// [GNU] attrib-name:
+/// identifier
+/// typespec
+/// typequal
+/// storageclass
+///
+/// FIXME: The GCC grammar/code for this construct implies we need two
+/// token lookahead. Comment from gcc: "If they start with an identifier
+/// which is followed by a comma or close parenthesis, then the arguments
+/// start with that identifier; otherwise they are an expression list."
+///
+/// At the moment, I am not doing 2 token lookahead. I am also unaware of
+/// any attributes that don't work (based on my limited testing). Most
+/// attributes are very simple in practice. Until we find a bug, I don't see
+/// a pressing need to implement the 2 token lookahead.
+
+AttributeList *Parser::ParseAttributes(SourceLocation *EndLoc) {
+ assert(Tok.is(tok::kw___attribute) && "Not an attribute list!");
+
+ AttributeList *CurrAttr = 0;
+
+ while (Tok.is(tok::kw___attribute)) {
+ ConsumeToken();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
+ "attribute")) {
+ SkipUntil(tok::r_paren, true); // skip until ) or ;
+ return CurrAttr;
+ }
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after, "(")) {
+ SkipUntil(tok::r_paren, true); // skip until ) or ;
+ return CurrAttr;
+ }
+ // Parse the attribute-list. e.g. __attribute__(( weak, alias("__f") ))
+ while (Tok.is(tok::identifier) || isDeclarationSpecifier() ||
+ Tok.is(tok::comma)) {
+
+ if (Tok.is(tok::comma)) {
+ // allows for empty/non-empty attributes. ((__vector_size__(16),,,,))
+ ConsumeToken();
+ continue;
+ }
+ // we have an identifier or declaration specifier (const, int, etc.)
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+
+ // check if we have a "paramterized" attribute
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen(); // ignore the left paren loc for now
+
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *ParmName = Tok.getIdentifierInfo();
+ SourceLocation ParmLoc = ConsumeToken();
+
+ if (Tok.is(tok::r_paren)) {
+ // __attribute__(( mode(byte) ))
+ ConsumeParen(); // ignore the right paren loc for now
+ CurrAttr = new AttributeList(AttrName, AttrNameLoc,
+ ParmName, ParmLoc, 0, 0, CurrAttr);
+ } else if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ // __attribute__(( format(printf, 1, 2) ))
+ ExprVector ArgExprs(Actions);
+ bool ArgExprsOk = true;
+
+ // now parse the non-empty comma separated list of expressions
+ while (1) {
+ OwningExprResult ArgExpr(ParseAssignmentExpression());
+ if (ArgExpr.isInvalid()) {
+ ArgExprsOk = false;
+ SkipUntil(tok::r_paren);
+ break;
+ } else {
+ ArgExprs.push_back(ArgExpr.release());
+ }
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // Eat the comma, move to the next argument
+ }
+ if (ArgExprsOk && Tok.is(tok::r_paren)) {
+ ConsumeParen(); // ignore the right paren loc for now
+ CurrAttr = new AttributeList(AttrName, AttrNameLoc, ParmName,
+ ParmLoc, ArgExprs.take(), ArgExprs.size(), CurrAttr);
+ }
+ }
+ } else { // not an identifier
+ // parse a possibly empty comma separated list of expressions
+ if (Tok.is(tok::r_paren)) {
+ // __attribute__(( nonnull() ))
+ ConsumeParen(); // ignore the right paren loc for now
+ CurrAttr = new AttributeList(AttrName, AttrNameLoc,
+ 0, SourceLocation(), 0, 0, CurrAttr);
+ } else {
+ // __attribute__(( aligned(16) ))
+ ExprVector ArgExprs(Actions);
+ bool ArgExprsOk = true;
+
+ // now parse the list of expressions
+ while (1) {
+ OwningExprResult ArgExpr(ParseAssignmentExpression());
+ if (ArgExpr.isInvalid()) {
+ ArgExprsOk = false;
+ SkipUntil(tok::r_paren);
+ break;
+ } else {
+ ArgExprs.push_back(ArgExpr.release());
+ }
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // Eat the comma, move to the next argument
+ }
+ // Match the ')'.
+ if (ArgExprsOk && Tok.is(tok::r_paren)) {
+ ConsumeParen(); // ignore the right paren loc for now
+ CurrAttr = new AttributeList(AttrName, AttrNameLoc, 0,
+ SourceLocation(), ArgExprs.take(), ArgExprs.size(),
+ CurrAttr);
+ }
+ }
+ }
+ } else {
+ CurrAttr = new AttributeList(AttrName, AttrNameLoc,
+ 0, SourceLocation(), 0, 0, CurrAttr);
+ }
+ }
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
+ SkipUntil(tok::r_paren, false);
+ SourceLocation Loc = Tok.getLocation();;
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
+ SkipUntil(tok::r_paren, false);
+ }
+ if (EndLoc)
+ *EndLoc = Loc;
+ }
+ return CurrAttr;
+}
+
+/// FuzzyParseMicrosoftDeclSpec. When -fms-extensions is enabled, this
+/// routine is called to skip/ignore tokens that comprise the MS declspec.
+void Parser::FuzzyParseMicrosoftDeclSpec() {
+ assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
+ ConsumeToken();
+ if (Tok.is(tok::l_paren)) {
+ unsigned short savedParenCount = ParenCount;
+ do {
+ ConsumeAnyToken();
+ } while (ParenCount > savedParenCount && Tok.isNot(tok::eof));
+ }
+ return;
+}
+
+/// ParseDeclaration - Parse a full 'declaration', which consists of
+/// declaration-specifiers, some number of declarators, and a semicolon.
+/// 'Context' should be a Declarator::TheContext value. This returns the
+/// location of the semicolon in DeclEnd.
+///
+/// declaration: [C99 6.7]
+/// block-declaration ->
+/// simple-declaration
+/// others [FIXME]
+/// [C++] template-declaration
+/// [C++] namespace-definition
+/// [C++] using-directive
+/// [C++] using-declaration [TODO]
+/// [C++0x] static_assert-declaration
+/// others... [FIXME]
+///
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
+ SourceLocation &DeclEnd) {
+ DeclPtrTy SingleDecl;
+ switch (Tok.getKind()) {
+ case tok::kw_template:
+ case tok::kw_export:
+ SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd);
+ break;
+ case tok::kw_namespace:
+ SingleDecl = ParseNamespace(Context, DeclEnd);
+ break;
+ case tok::kw_using:
+ SingleDecl = ParseUsingDirectiveOrDeclaration(Context, DeclEnd);
+ break;
+ case tok::kw_static_assert:
+ SingleDecl = ParseStaticAssertDeclaration(DeclEnd);
+ break;
+ default:
+ return ParseSimpleDeclaration(Context, DeclEnd);
+ }
+
+ // This routine returns a DeclGroup, if the thing we parsed only contains a
+ // single decl, convert it now.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl);
+}
+
+/// simple-declaration: [C99 6.7: declaration] [C++ 7p1: dcl.dcl]
+/// declaration-specifiers init-declarator-list[opt] ';'
+///[C90/C++]init-declarator-list ';' [TODO]
+/// [OMP] threadprivate-directive [TODO]
+///
+/// If RequireSemi is false, this does not check for a ';' at the end of the
+/// declaration.
+Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(unsigned Context,
+ SourceLocation &DeclEnd,
+ bool RequireSemi) {
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS;
+ ParseDeclarationSpecifiers(DS);
+
+ // C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
+ // declaration-specifiers init-declarator-list[opt] ';'
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(CurScope, DS);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ Declarator DeclaratorInfo(DS, (Declarator::TheContext)Context);
+ ParseDeclarator(DeclaratorInfo);
+
+ DeclGroupPtrTy DG =
+ ParseInitDeclaratorListAfterFirstDeclarator(DeclaratorInfo);
+
+ DeclEnd = Tok.getLocation();
+
+ // If the client wants to check what comes after the declaration, just return
+ // immediately without checking anything!
+ if (!RequireSemi) return DG;
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ return DG;
+ }
+
+ Diag(Tok, diag::err_expected_semi_declation);
+ // Skip to end of block or statement
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return DG;
+}
+
+/// \brief Parse 'declaration' after parsing 'declaration-specifiers
+/// declarator'. This method parses the remainder of the declaration
+/// (including any attributes or initializer, among other things) and
+/// finalizes the declaration.
+///
+/// init-declarator: [C99 6.7]
+/// declarator
+/// declarator '=' initializer
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt]
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt] '=' initializer
+/// [C++] declarator initializer[opt]
+///
+/// [C++] initializer:
+/// [C++] '=' initializer-clause
+/// [C++] '(' expression-list ')'
+/// [C++0x] '=' 'default' [TODO]
+/// [C++0x] '=' 'delete'
+///
+/// According to the standard grammar, =default and =delete are function
+/// definitions, but that definitely doesn't fit with the parser here.
+///
+Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D) {
+ // If a simple-asm-expr is present, parse it.
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ OwningExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid()) {
+ SkipUntil(tok::semi, true, true);
+ return DeclPtrTy();
+ }
+
+ D.setAsmLabel(AsmLabel.release());
+ D.SetRangeEnd(Loc);
+ }
+
+ // If attributes are present, parse them.
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ D.AddAttributes(AttrList, Loc);
+ }
+
+ // Inform the current actions module that we just parsed this declarator.
+ DeclPtrTy ThisDecl = Actions.ActOnDeclarator(CurScope, D);
+
+ // Parse declarator '=' initializer.
+ if (Tok.is(tok::equal)) {
+ ConsumeToken();
+ if (getLang().CPlusPlus0x && Tok.is(tok::kw_delete)) {
+ SourceLocation DelLoc = ConsumeToken();
+ Actions.SetDeclDeleted(ThisDecl, DelLoc);
+ } else {
+ OwningExprResult Init(ParseInitializer());
+ if (Init.isInvalid()) {
+ SkipUntil(tok::semi, true, true);
+ return DeclPtrTy();
+ }
+ Actions.AddInitializerToDecl(ThisDecl, Actions.FullExpr(Init));
+ }
+ } else if (Tok.is(tok::l_paren)) {
+ // Parse C++ direct initializer: '(' expression-list ')'
+ SourceLocation LParenLoc = ConsumeParen();
+ ExprVector Exprs(Actions);
+ CommaLocsTy CommaLocs;
+
+ if (ParseExpressionList(Exprs, CommaLocs)) {
+ SkipUntil(tok::r_paren);
+ } else {
+ // Match the ')'.
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ assert(!Exprs.empty() && Exprs.size()-1 == CommaLocs.size() &&
+ "Unexpected number of commas!");
+ Actions.AddCXXDirectInitializerToDecl(ThisDecl, LParenLoc,
+ move_arg(Exprs),
+ CommaLocs.data(), RParenLoc);
+ }
+ } else {
+ Actions.ActOnUninitializedDecl(ThisDecl);
+ }
+
+ return ThisDecl;
+}
+
+/// ParseInitDeclaratorListAfterFirstDeclarator - Parse 'declaration' after
+/// parsing 'declaration-specifiers declarator'. This method is split out this
+/// way to handle the ambiguity between top-level function-definitions and
+/// declarations.
+///
+/// init-declarator-list: [C99 6.7]
+/// init-declarator
+/// init-declarator-list ',' init-declarator
+///
+/// According to the standard grammar, =default and =delete are function
+/// definitions, but that definitely doesn't fit with the parser here.
+///
+Parser::DeclGroupPtrTy Parser::
+ParseInitDeclaratorListAfterFirstDeclarator(Declarator &D) {
+ // Declarators may be grouped together ("int X, *Y, Z();"). Remember the decls
+ // that we parse together here.
+ llvm::SmallVector<DeclPtrTy, 8> DeclsInGroup;
+
+ // At this point, we know that it is not a function definition. Parse the
+ // rest of the init-declarator-list.
+ while (1) {
+ DeclPtrTy ThisDecl = ParseDeclarationAfterDeclarator(D);
+ if (ThisDecl.get())
+ DeclsInGroup.push_back(ThisDecl);
+
+ // If we don't have a comma, it is either the end of the list (a ';') or an
+ // error, bail out.
+ if (Tok.isNot(tok::comma))
+ break;
+
+ // Consume the comma.
+ ConsumeToken();
+
+ // Parse the next declarator.
+ D.clear();
+
+ // Accept attributes in an init-declarator. In the first declarator in a
+ // declaration, these would be part of the declspec. In subsequent
+ // declarators, they become part of the declarator itself, so that they
+ // don't apply to declarators after *this* one. Examples:
+ // short __attribute__((common)) var; -> declspec
+ // short var __attribute__((common)); -> declarator
+ // short x, __attribute__((common)) var; -> declarator
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ D.AddAttributes(AttrList, Loc);
+ }
+
+ ParseDeclarator(D);
+ }
+
+ return Actions.FinalizeDeclaratorGroup(CurScope, D.getDeclSpec(),
+ DeclsInGroup.data(),
+ DeclsInGroup.size());
+}
+
+/// ParseSpecifierQualifierList
+/// specifier-qualifier-list:
+/// type-specifier specifier-qualifier-list[opt]
+/// type-qualifier specifier-qualifier-list[opt]
+/// [GNU] attributes specifier-qualifier-list[opt]
+///
+void Parser::ParseSpecifierQualifierList(DeclSpec &DS) {
+ /// specifier-qualifier-list is a subset of declaration-specifiers. Just
+ /// parse declaration-specifiers and complain about extra stuff.
+ ParseDeclarationSpecifiers(DS);
+
+ // Validate declspec for type-name.
+ unsigned Specs = DS.getParsedSpecifiers();
+ if (Specs == DeclSpec::PQ_None && !DS.getNumProtocolQualifiers() &&
+ !DS.getAttributes())
+ Diag(Tok, diag::err_typename_requires_specqual);
+
+ // Issue diagnostic and remove storage class if present.
+ if (Specs & DeclSpec::PQ_StorageClassSpecifier) {
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(),diag::err_typename_invalid_storageclass);
+ else
+ Diag(DS.getThreadSpecLoc(), diag::err_typename_invalid_storageclass);
+ DS.ClearStorageClassSpecs();
+ }
+
+ // Issue diagnostic and remove function specfier if present.
+ if (Specs & DeclSpec::PQ_FunctionSpecifier) {
+ if (DS.isInlineSpecified())
+ Diag(DS.getInlineSpecLoc(), diag::err_typename_invalid_functionspec);
+ if (DS.isVirtualSpecified())
+ Diag(DS.getVirtualSpecLoc(), diag::err_typename_invalid_functionspec);
+ if (DS.isExplicitSpecified())
+ Diag(DS.getExplicitSpecLoc(), diag::err_typename_invalid_functionspec);
+ DS.ClearFunctionSpecs();
+ }
+}
+
+/// isValidAfterIdentifierInDeclaratorAfterDeclSpec - Return true if the
+/// specified token is valid after the identifier in a declarator which
+/// immediately follows the declspec. For example, these things are valid:
+///
+/// int x [ 4]; // direct-declarator
+/// int x ( int y); // direct-declarator
+/// int(int x ) // direct-declarator
+/// int x ; // simple-declaration
+/// int x = 17; // init-declarator-list
+/// int x , y; // init-declarator-list
+/// int x __asm__ ("foo"); // init-declarator-list
+/// int x : 4; // struct-declarator
+/// int x { 5}; // C++'0x unified initializers
+///
+/// This is not, because 'x' does not immediately follow the declspec (though
+/// ')' happens to be valid anyway).
+/// int (x)
+///
+static bool isValidAfterIdentifierInDeclarator(const Token &T) {
+ return T.is(tok::l_square) || T.is(tok::l_paren) || T.is(tok::r_paren) ||
+ T.is(tok::semi) || T.is(tok::comma) || T.is(tok::equal) ||
+ T.is(tok::kw_asm) || T.is(tok::l_brace) || T.is(tok::colon);
+}
+
+
+/// ParseImplicitInt - This method is called when we have an non-typename
+/// identifier in a declspec (which normally terminates the decl spec) when
+/// the declspec has no type specifier. In this case, the declspec is either
+/// malformed or is "implicit int" (in K&R and C89).
+///
+/// This method handles diagnosing this prettily and returns false if the
+/// declspec is done being processed. If it recovers and thinks there may be
+/// other pieces of declspec after it, it returns true.
+///
+bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS) {
+ assert(Tok.is(tok::identifier) && "should have identifier");
+
+ SourceLocation Loc = Tok.getLocation();
+ // If we see an identifier that is not a type name, we normally would
+ // parse it as the identifer being declared. However, when a typename
+ // is typo'd or the definition is not included, this will incorrectly
+ // parse the typename as the identifier name and fall over misparsing
+ // later parts of the diagnostic.
+ //
+ // As such, we try to do some look-ahead in cases where this would
+ // otherwise be an "implicit-int" case to see if this is invalid. For
+ // example: "static foo_t x = 4;" In this case, if we parsed foo_t as
+ // an identifier with implicit int, we'd get a parse error because the
+ // next token is obviously invalid for a type. Parse these as a case
+ // with an invalid type specifier.
+ assert(!DS.hasTypeSpecifier() && "Type specifier checked above");
+
+ // Since we know that this either implicit int (which is rare) or an
+ // error, we'd do lookahead to try to do better recovery.
+ if (isValidAfterIdentifierInDeclarator(NextToken())) {
+ // If this token is valid for implicit int, e.g. "static x = 4", then
+ // we just avoid eating the identifier, so it will be parsed as the
+ // identifier in the declarator.
+ return false;
+ }
+
+ // Otherwise, if we don't consume this token, we are going to emit an
+ // error anyway. Try to recover from various common problems. Check
+ // to see if this was a reference to a tag name without a tag specified.
+ // This is a common problem in C (saying 'foo' instead of 'struct foo').
+ //
+ // C++ doesn't need this, and isTagName doesn't take SS.
+ if (SS == 0) {
+ const char *TagName = 0;
+ tok::TokenKind TagKind = tok::unknown;
+
+ switch (Actions.isTagName(*Tok.getIdentifierInfo(), CurScope)) {
+ default: break;
+ case DeclSpec::TST_enum: TagName="enum" ;TagKind=tok::kw_enum ;break;
+ case DeclSpec::TST_union: TagName="union" ;TagKind=tok::kw_union ;break;
+ case DeclSpec::TST_struct:TagName="struct";TagKind=tok::kw_struct;break;
+ case DeclSpec::TST_class: TagName="class" ;TagKind=tok::kw_class ;break;
+ }
+
+ if (TagName) {
+ Diag(Loc, diag::err_use_of_tag_name_without_tag)
+ << Tok.getIdentifierInfo() << TagName
+ << CodeModificationHint::CreateInsertion(Tok.getLocation(),TagName);
+
+ // Parse this as a tag as if the missing tag were present.
+ if (TagKind == tok::kw_enum)
+ ParseEnumSpecifier(Loc, DS, AS);
+ else
+ ParseClassSpecifier(TagKind, Loc, DS, TemplateInfo, AS);
+ return true;
+ }
+ }
+
+ // Since this is almost certainly an invalid type name, emit a
+ // diagnostic that says it, eat the token, and mark the declspec as
+ // invalid.
+ SourceRange R;
+ if (SS) R = SS->getRange();
+
+ Diag(Loc, diag::err_unknown_typename) << Tok.getIdentifierInfo() << R;
+ const char *PrevSpec;
+ DS.SetTypeSpecType(DeclSpec::TST_error, Loc, PrevSpec);
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+
+ // TODO: Could inject an invalid typedef decl in an enclosing scope to
+ // avoid rippling error messages on subsequent uses of the same type,
+ // could be useful if #include was forgotten.
+ return false;
+}
+
+/// ParseDeclarationSpecifiers
+/// declaration-specifiers: [C99 6.7]
+/// storage-class-specifier declaration-specifiers[opt]
+/// type-specifier declaration-specifiers[opt]
+/// [C99] function-specifier declaration-specifiers[opt]
+/// [GNU] attributes declaration-specifiers[opt]
+///
+/// storage-class-specifier: [C99 6.7.1]
+/// 'typedef'
+/// 'extern'
+/// 'static'
+/// 'auto'
+/// 'register'
+/// [C++] 'mutable'
+/// [GNU] '__thread'
+/// function-specifier: [C99 6.7.4]
+/// [C99] 'inline'
+/// [C++] 'virtual'
+/// [C++] 'explicit'
+/// 'friend': [C++ dcl.friend]
+
+///
+void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS) {
+ DS.SetRangeStart(Tok.getLocation());
+ while (1) {
+ int isInvalid = false;
+ const char *PrevSpec = 0;
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ default:
+ DoneWithDeclSpec:
+ // If this is not a declaration specifier token, we're done reading decl
+ // specifiers. First verify that DeclSpec's are consistent.
+ DS.Finish(Diags, PP);
+ return;
+
+ case tok::coloncolon: // ::foo::bar
+ // Annotate C++ scope specifiers. If we get one, loop.
+ if (TryAnnotateCXXScopeToken())
+ continue;
+ goto DoneWithDeclSpec;
+
+ case tok::annot_cxxscope: {
+ if (DS.hasTypeSpecifier())
+ goto DoneWithDeclSpec;
+
+ // We are looking for a qualified typename.
+ Token Next = NextToken();
+ if (Next.is(tok::annot_template_id) &&
+ static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue())
+ ->Kind == TNK_Type_template) {
+ // We have a qualified template-id, e.g., N::A<int>
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS);
+ assert(Tok.is(tok::annot_template_id) &&
+ "ParseOptionalCXXScopeSpecifier not working");
+ AnnotateTemplateIdTokenAsType(&SS);
+ continue;
+ }
+
+ if (Next.isNot(tok::identifier))
+ goto DoneWithDeclSpec;
+
+ CXXScopeSpec SS;
+ SS.setScopeRep(Tok.getAnnotationValue());
+ SS.setRange(Tok.getAnnotationRange());
+
+ // If the next token is the name of the class type that the C++ scope
+ // denotes, followed by a '(', then this is a constructor declaration.
+ // We're done with the decl-specifiers.
+ if (Actions.isCurrentClassName(*Next.getIdentifierInfo(),
+ CurScope, &SS) &&
+ GetLookAheadToken(2).is(tok::l_paren))
+ goto DoneWithDeclSpec;
+
+ TypeTy *TypeRep = Actions.getTypeName(*Next.getIdentifierInfo(),
+ Next.getLocation(), CurScope, &SS);
+
+ // If the referenced identifier is not a type, then this declspec is
+ // erroneous: We already checked about that it has no type specifier, and
+ // C++ doesn't have implicit int. Diagnose it as a typo w.r.t. to the
+ // typename.
+ if (TypeRep == 0) {
+ ConsumeToken(); // Eat the scope spec so the identifier is current.
+ if (ParseImplicitInt(DS, &SS, TemplateInfo, AS)) continue;
+ goto DoneWithDeclSpec;
+ }
+
+ ConsumeToken(); // The C++ scope.
+
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ TypeRep);
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken(); // The typename.
+
+ continue;
+ }
+
+ case tok::annot_typename: {
+ if (Tok.getAnnotationValue())
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ Tok.getAnnotationValue());
+ else
+ DS.SetTypeSpecError();
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken(); // The typename
+
+ // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
+ // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
+ // Objective-C interface. If we don't have Objective-C or a '<', this is
+ // just a normal reference to a typedef name.
+ if (!Tok.is(tok::less) || !getLang().ObjC1)
+ continue;
+
+ SourceLocation EndProtoLoc;
+ llvm::SmallVector<DeclPtrTy, 8> ProtocolDecl;
+ ParseObjCProtocolReferences(ProtocolDecl, false, EndProtoLoc);
+ DS.setProtocolQualifiers(&ProtocolDecl[0], ProtocolDecl.size());
+
+ DS.SetRangeEnd(EndProtoLoc);
+ continue;
+ }
+
+ // typedef-name
+ case tok::identifier: {
+ // In C++, check to see if this is a scope specifier like foo::bar::, if
+ // so handle it as such. This is important for ctor parsing.
+ if (getLang().CPlusPlus && TryAnnotateCXXScopeToken())
+ continue;
+
+ // This identifier can only be a typedef name if we haven't already seen
+ // a type-specifier. Without this check we misparse:
+ // typedef int X; struct Y { short X; }; as 'short int'.
+ if (DS.hasTypeSpecifier())
+ goto DoneWithDeclSpec;
+
+ // It has to be available as a typedef too!
+ TypeTy *TypeRep = Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), CurScope);
+
+ // If this is not a typedef name, don't parse it as part of the declspec,
+ // it must be an implicit int or an error.
+ if (TypeRep == 0) {
+ if (ParseImplicitInt(DS, 0, TemplateInfo, AS)) continue;
+ goto DoneWithDeclSpec;
+ }
+
+ // C++: If the identifier is actually the name of the class type
+ // being defined and the next token is a '(', then this is a
+ // constructor declaration. We're done with the decl-specifiers
+ // and will treat this token as an identifier.
+ if (getLang().CPlusPlus && CurScope->isClassScope() &&
+ Actions.isCurrentClassName(*Tok.getIdentifierInfo(), CurScope) &&
+ NextToken().getKind() == tok::l_paren)
+ goto DoneWithDeclSpec;
+
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ TypeRep);
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken(); // The identifier
+
+ // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
+ // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
+ // Objective-C interface. If we don't have Objective-C or a '<', this is
+ // just a normal reference to a typedef name.
+ if (!Tok.is(tok::less) || !getLang().ObjC1)
+ continue;
+
+ SourceLocation EndProtoLoc;
+ llvm::SmallVector<DeclPtrTy, 8> ProtocolDecl;
+ ParseObjCProtocolReferences(ProtocolDecl, false, EndProtoLoc);
+ DS.setProtocolQualifiers(&ProtocolDecl[0], ProtocolDecl.size());
+
+ DS.SetRangeEnd(EndProtoLoc);
+
+ // Need to support trailing type qualifiers (e.g. "id<p> const").
+ // If a type specifier follows, it will be diagnosed elsewhere.
+ continue;
+ }
+
+ // type-name
+ case tok::annot_template_id: {
+ TemplateIdAnnotation *TemplateId
+ = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ if (TemplateId->Kind != TNK_Type_template) {
+ // This template-id does not refer to a type name, so we're
+ // done with the type-specifiers.
+ goto DoneWithDeclSpec;
+ }
+
+ // Turn the template-id annotation token into a type annotation
+ // token, then try again to parse it as a type-specifier.
+ AnnotateTemplateIdTokenAsType();
+ continue;
+ }
+
+ // GNU attributes support.
+ case tok::kw___attribute:
+ DS.AddAttributes(ParseAttributes());
+ continue;
+
+ // Microsoft declspec support.
+ case tok::kw___declspec:
+ if (!PP.getLangOptions().Microsoft)
+ goto DoneWithDeclSpec;
+ FuzzyParseMicrosoftDeclSpec();
+ continue;
+
+ // Microsoft single token adornments.
+ case tok::kw___forceinline:
+ case tok::kw___w64:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ if (!PP.getLangOptions().Microsoft)
+ goto DoneWithDeclSpec;
+ // Just ignore it.
+ break;
+
+ // storage-class-specifier
+ case tok::kw_typedef:
+ isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_typedef, Loc, PrevSpec);
+ break;
+ case tok::kw_extern:
+ if (DS.isThreadSpecified())
+ Diag(Tok, diag::ext_thread_before) << "extern";
+ isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_extern, Loc, PrevSpec);
+ break;
+ case tok::kw___private_extern__:
+ isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_private_extern, Loc,
+ PrevSpec);
+ break;
+ case tok::kw_static:
+ if (DS.isThreadSpecified())
+ Diag(Tok, diag::ext_thread_before) << "static";
+ isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_static, Loc, PrevSpec);
+ break;
+ case tok::kw_auto:
+ isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_auto, Loc, PrevSpec);
+ break;
+ case tok::kw_register:
+ isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_register, Loc, PrevSpec);
+ break;
+ case tok::kw_mutable:
+ isInvalid = DS.SetStorageClassSpec(DeclSpec::SCS_mutable, Loc, PrevSpec);
+ break;
+ case tok::kw___thread:
+ isInvalid = DS.SetStorageClassSpecThread(Loc, PrevSpec)*2;
+ break;
+
+ // function-specifier
+ case tok::kw_inline:
+ isInvalid = DS.SetFunctionSpecInline(Loc, PrevSpec);
+ break;
+ case tok::kw_virtual:
+ isInvalid = DS.SetFunctionSpecVirtual(Loc, PrevSpec);
+ break;
+ case tok::kw_explicit:
+ isInvalid = DS.SetFunctionSpecExplicit(Loc, PrevSpec);
+ break;
+
+ // friend
+ case tok::kw_friend:
+ isInvalid = DS.SetFriendSpec(Loc, PrevSpec);
+ break;
+
+ // type-specifier
+ case tok::kw_short:
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec);
+ break;
+ case tok::kw_long:
+ if (DS.getTypeSpecWidth() != DeclSpec::TSW_long)
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec);
+ else
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec);
+ break;
+ case tok::kw_signed:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec);
+ break;
+ case tok::kw_unsigned:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec);
+ break;
+ case tok::kw__Complex:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_complex, Loc, PrevSpec);
+ break;
+ case tok::kw__Imaginary:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_imaginary, Loc, PrevSpec);
+ break;
+ case tok::kw_void:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec);
+ break;
+ case tok::kw_char:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec);
+ break;
+ case tok::kw_int:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec);
+ break;
+ case tok::kw_float:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec);
+ break;
+ case tok::kw_double:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec);
+ break;
+ case tok::kw_wchar_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec);
+ break;
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec);
+ break;
+ case tok::kw__Decimal32:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal32, Loc, PrevSpec);
+ break;
+ case tok::kw__Decimal64:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal64, Loc, PrevSpec);
+ break;
+ case tok::kw__Decimal128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal128, Loc, PrevSpec);
+ break;
+
+ // class-specifier:
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union: {
+ tok::TokenKind Kind = Tok.getKind();
+ ConsumeToken();
+ ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS);
+ continue;
+ }
+
+ // enum-specifier:
+ case tok::kw_enum:
+ ConsumeToken();
+ ParseEnumSpecifier(Loc, DS, AS);
+ continue;
+
+ // cv-qualifier:
+ case tok::kw_const:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_const, Loc, PrevSpec,getLang())*2;
+ break;
+ case tok::kw_volatile:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec,
+ getLang())*2;
+ break;
+ case tok::kw_restrict:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec,
+ getLang())*2;
+ break;
+
+ // C++ typename-specifier:
+ case tok::kw_typename:
+ if (TryAnnotateTypeOrScopeToken())
+ continue;
+ break;
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+ ParseTypeofSpecifier(DS);
+ continue;
+
+ case tok::less:
+ // GCC ObjC supports types like "<SomeProtocol>" as a synonym for
+ // "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
+ // but we support it.
+ if (DS.hasTypeSpecifier() || !getLang().ObjC1)
+ goto DoneWithDeclSpec;
+
+ {
+ SourceLocation EndProtoLoc;
+ llvm::SmallVector<DeclPtrTy, 8> ProtocolDecl;
+ ParseObjCProtocolReferences(ProtocolDecl, false, EndProtoLoc);
+ DS.setProtocolQualifiers(&ProtocolDecl[0], ProtocolDecl.size());
+ DS.SetRangeEnd(EndProtoLoc);
+
+ Diag(Loc, diag::warn_objc_protocol_qualifier_missing_id)
+ << CodeModificationHint::CreateInsertion(Loc, "id")
+ << SourceRange(Loc, EndProtoLoc);
+ // Need to support trailing type qualifiers (e.g. "id<p> const").
+ // If a type specifier follows, it will be diagnosed elsewhere.
+ continue;
+ }
+ }
+ // If the specifier combination wasn't legal, issue a diagnostic.
+ if (isInvalid) {
+ assert(PrevSpec && "Method did not return previous specifier!");
+ // Pick between error or extwarn.
+ unsigned DiagID = isInvalid == 1 ? diag::err_invalid_decl_spec_combination
+ : diag::ext_duplicate_declspec;
+ Diag(Tok, DiagID) << PrevSpec;
+ }
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+ }
+}
+
+/// ParseOptionalTypeSpecifier - Try to parse a single type-specifier. We
+/// primarily follow the C++ grammar with additions for C99 and GNU,
+/// which together subsume the C grammar. Note that the C++
+/// type-specifier also includes the C type-qualifier (for const,
+/// volatile, and C99 restrict). Returns true if a type-specifier was
+/// found (and parsed), false otherwise.
+///
+/// type-specifier: [C++ 7.1.5]
+/// simple-type-specifier
+/// class-specifier
+/// enum-specifier
+/// elaborated-type-specifier [TODO]
+/// cv-qualifier
+///
+/// cv-qualifier: [C++ 7.1.5.1]
+/// 'const'
+/// 'volatile'
+/// [C99] 'restrict'
+///
+/// simple-type-specifier: [ C++ 7.1.5.2]
+/// '::'[opt] nested-name-specifier[opt] type-name [TODO]
+/// '::'[opt] nested-name-specifier 'template' template-id [TODO]
+/// 'char'
+/// 'wchar_t'
+/// 'bool'
+/// 'short'
+/// 'int'
+/// 'long'
+/// 'signed'
+/// 'unsigned'
+/// 'float'
+/// 'double'
+/// 'void'
+/// [C99] '_Bool'
+/// [C99] '_Complex'
+/// [C99] '_Imaginary' // Removed in TC2?
+/// [GNU] '_Decimal32'
+/// [GNU] '_Decimal64'
+/// [GNU] '_Decimal128'
+/// [GNU] typeof-specifier
+/// [OBJC] class-name objc-protocol-refs[opt] [TODO]
+/// [OBJC] typedef-name objc-protocol-refs[opt] [TODO]
+bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, int& isInvalid,
+ const char *&PrevSpec,
+ const ParsedTemplateInfo &TemplateInfo) {
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ case tok::identifier: // foo::bar
+ case tok::kw_typename: // typename foo::bar
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, TemplateInfo);
+ // Otherwise, not a type specifier.
+ return false;
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return false;
+
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, TemplateInfo);
+ // Otherwise, not a type specifier.
+ return false;
+
+ // simple-type-specifier:
+ case tok::annot_typename: {
+ if (Tok.getAnnotationValue())
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ Tok.getAnnotationValue());
+ else
+ DS.SetTypeSpecError();
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken(); // The typename
+
+ // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
+ // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
+ // Objective-C interface. If we don't have Objective-C or a '<', this is
+ // just a normal reference to a typedef name.
+ if (!Tok.is(tok::less) || !getLang().ObjC1)
+ return true;
+
+ SourceLocation EndProtoLoc;
+ llvm::SmallVector<DeclPtrTy, 8> ProtocolDecl;
+ ParseObjCProtocolReferences(ProtocolDecl, false, EndProtoLoc);
+ DS.setProtocolQualifiers(&ProtocolDecl[0], ProtocolDecl.size());
+
+ DS.SetRangeEnd(EndProtoLoc);
+ return true;
+ }
+
+ case tok::kw_short:
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec);
+ break;
+ case tok::kw_long:
+ if (DS.getTypeSpecWidth() != DeclSpec::TSW_long)
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec);
+ else
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec);
+ break;
+ case tok::kw_signed:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec);
+ break;
+ case tok::kw_unsigned:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec);
+ break;
+ case tok::kw__Complex:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_complex, Loc, PrevSpec);
+ break;
+ case tok::kw__Imaginary:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_imaginary, Loc, PrevSpec);
+ break;
+ case tok::kw_void:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec);
+ break;
+ case tok::kw_char:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec);
+ break;
+ case tok::kw_int:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec);
+ break;
+ case tok::kw_float:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec);
+ break;
+ case tok::kw_double:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec);
+ break;
+ case tok::kw_wchar_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec);
+ break;
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec);
+ break;
+ case tok::kw__Decimal32:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal32, Loc, PrevSpec);
+ break;
+ case tok::kw__Decimal64:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal64, Loc, PrevSpec);
+ break;
+ case tok::kw__Decimal128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal128, Loc, PrevSpec);
+ break;
+
+ // class-specifier:
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union: {
+ tok::TokenKind Kind = Tok.getKind();
+ ConsumeToken();
+ ParseClassSpecifier(Kind, Loc, DS, TemplateInfo);
+ return true;
+ }
+
+ // enum-specifier:
+ case tok::kw_enum:
+ ConsumeToken();
+ ParseEnumSpecifier(Loc, DS);
+ return true;
+
+ // cv-qualifier:
+ case tok::kw_const:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec,
+ getLang())*2;
+ break;
+ case tok::kw_volatile:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec,
+ getLang())*2;
+ break;
+ case tok::kw_restrict:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec,
+ getLang())*2;
+ break;
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+ ParseTypeofSpecifier(DS);
+ return true;
+
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ if (!PP.getLangOptions().Microsoft) return false;
+ ConsumeToken();
+ return true;
+
+ default:
+ // Not a type-specifier; do nothing.
+ return false;
+ }
+
+ // If the specifier combination wasn't legal, issue a diagnostic.
+ if (isInvalid) {
+ assert(PrevSpec && "Method did not return previous specifier!");
+ // Pick between error or extwarn.
+ unsigned DiagID = isInvalid == 1 ? diag::err_invalid_decl_spec_combination
+ : diag::ext_duplicate_declspec;
+ Diag(Tok, DiagID) << PrevSpec;
+ }
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken(); // whatever we parsed above.
+ return true;
+}
+
+/// ParseStructDeclaration - Parse a struct declaration without the terminating
+/// semicolon.
+///
+/// struct-declaration:
+/// specifier-qualifier-list struct-declarator-list
+/// [GNU] __extension__ struct-declaration
+/// [GNU] specifier-qualifier-list
+/// struct-declarator-list:
+/// struct-declarator
+/// struct-declarator-list ',' struct-declarator
+/// [GNU] struct-declarator-list ',' attributes[opt] struct-declarator
+/// struct-declarator:
+/// declarator
+/// [GNU] declarator attributes[opt]
+/// declarator[opt] ':' constant-expression
+/// [GNU] declarator[opt] ':' constant-expression attributes[opt]
+///
+void Parser::
+ParseStructDeclaration(DeclSpec &DS,
+ llvm::SmallVectorImpl<FieldDeclarator> &Fields) {
+ if (Tok.is(tok::kw___extension__)) {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseStructDeclaration(DS, Fields);
+ }
+
+ // Parse the common specifier-qualifiers-list piece.
+ SourceLocation DSStart = Tok.getLocation();
+ ParseSpecifierQualifierList(DS);
+
+ // If there are no declarators, this is a free-standing declaration
+ // specifier. Let the actions module cope with it.
+ if (Tok.is(tok::semi)) {
+ Actions.ParsedFreeStandingDeclSpec(CurScope, DS);
+ return;
+ }
+
+ // Read struct-declarators until we find the semicolon.
+ Fields.push_back(FieldDeclarator(DS));
+ while (1) {
+ FieldDeclarator &DeclaratorInfo = Fields.back();
+
+ /// struct-declarator: declarator
+ /// struct-declarator: declarator[opt] ':' constant-expression
+ if (Tok.isNot(tok::colon))
+ ParseDeclarator(DeclaratorInfo.D);
+
+ if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ OwningExprResult Res(ParseConstantExpression());
+ if (Res.isInvalid())
+ SkipUntil(tok::semi, true, true);
+ else
+ DeclaratorInfo.BitfieldSize = Res.release();
+ }
+
+ // If attributes exist after the declarator, parse them.
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ DeclaratorInfo.D.AddAttributes(AttrList, Loc);
+ }
+
+ // If we don't have a comma, it is either the end of the list (a ';')
+ // or an error, bail out.
+ if (Tok.isNot(tok::comma))
+ return;
+
+ // Consume the comma.
+ ConsumeToken();
+
+ // Parse the next declarator.
+ Fields.push_back(FieldDeclarator(DS));
+
+ // Attributes are only allowed on the second declarator.
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ Fields.back().D.AddAttributes(AttrList, Loc);
+ }
+ }
+}
+
+/// ParseStructUnionBody
+/// struct-contents:
+/// struct-declaration-list
+/// [EXT] empty
+/// [GNU] "struct-declaration-list" without terminatoring ';'
+/// struct-declaration-list:
+/// struct-declaration
+/// struct-declaration-list struct-declaration
+/// [OBC] '@' 'defs' '(' class-name ')'
+///
+void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
+ unsigned TagType, DeclPtrTy TagDecl) {
+ PrettyStackTraceActionsDecl CrashInfo(TagDecl, RecordLoc, Actions,
+ PP.getSourceManager(),
+ "parsing struct/union body");
+
+ SourceLocation LBraceLoc = ConsumeBrace();
+
+ ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope);
+ Actions.ActOnTagStartDefinition(CurScope, TagDecl);
+
+ // Empty structs are an extension in C (C99 6.7.2.1p7), but are allowed in
+ // C++.
+ if (Tok.is(tok::r_brace) && !getLang().CPlusPlus)
+ Diag(Tok, diag::ext_empty_struct_union_enum)
+ << DeclSpec::getSpecifierName((DeclSpec::TST)TagType);
+
+ llvm::SmallVector<DeclPtrTy, 32> FieldDecls;
+ llvm::SmallVector<FieldDeclarator, 8> FieldDeclarators;
+
+ // While we still have something to read, read the declarations in the struct.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one struct-declaration.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_struct_semi)
+ << CodeModificationHint::CreateRemoval(SourceRange(Tok.getLocation()));
+ ConsumeToken();
+ continue;
+ }
+
+ // Parse all the comma separated declarators.
+ DeclSpec DS;
+ FieldDeclarators.clear();
+ if (!Tok.is(tok::at)) {
+ ParseStructDeclaration(DS, FieldDeclarators);
+
+ // Convert them all to fields.
+ for (unsigned i = 0, e = FieldDeclarators.size(); i != e; ++i) {
+ FieldDeclarator &FD = FieldDeclarators[i];
+ // Install the declarator into the current TagDecl.
+ DeclPtrTy Field = Actions.ActOnField(CurScope, TagDecl,
+ DS.getSourceRange().getBegin(),
+ FD.D, FD.BitfieldSize);
+ FieldDecls.push_back(Field);
+ }
+ } else { // Handle @defs
+ ConsumeToken();
+ if (!Tok.isObjCAtKeyword(tok::objc_defs)) {
+ Diag(Tok, diag::err_unexpected_at);
+ SkipUntil(tok::semi, true, true);
+ continue;
+ }
+ ConsumeToken();
+ ExpectAndConsume(tok::l_paren, diag::err_expected_lparen);
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::semi, true, true);
+ continue;
+ }
+ llvm::SmallVector<DeclPtrTy, 16> Fields;
+ Actions.ActOnDefs(CurScope, TagDecl, Tok.getLocation(),
+ Tok.getIdentifierInfo(), Fields);
+ FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end());
+ ConsumeToken();
+ ExpectAndConsume(tok::r_paren, diag::err_expected_rparen);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else if (Tok.is(tok::r_brace)) {
+ Diag(Tok, diag::ext_expected_semi_decl_list);
+ break;
+ } else {
+ Diag(Tok, diag::err_expected_semi_decl_list);
+ // Skip to end of block or statement
+ SkipUntil(tok::r_brace, true, true);
+ }
+ }
+
+ SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
+
+ AttributeList *AttrList = 0;
+ // If attributes exist after struct contents, parse them.
+ if (Tok.is(tok::kw___attribute))
+ AttrList = ParseAttributes();
+
+ Actions.ActOnFields(CurScope,
+ RecordLoc, TagDecl, FieldDecls.data(), FieldDecls.size(),
+ LBraceLoc, RBraceLoc,
+ AttrList);
+ StructScope.Exit();
+ Actions.ActOnTagFinishDefinition(CurScope, TagDecl);
+}
+
+
+/// ParseEnumSpecifier
+/// enum-specifier: [C99 6.7.2.2]
+/// 'enum' identifier[opt] '{' enumerator-list '}'
+///[C99/C++]'enum' identifier[opt] '{' enumerator-list ',' '}'
+/// [GNU] 'enum' attributes[opt] identifier[opt] '{' enumerator-list ',' [opt]
+/// '}' attributes[opt]
+/// 'enum' identifier
+/// [GNU] 'enum' attributes[opt] identifier
+///
+/// [C++] elaborated-type-specifier:
+/// [C++] 'enum' '::'[opt] nested-name-specifier[opt] identifier
+///
+void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
+ AccessSpecifier AS) {
+ // Parse the tag portion of this.
+
+ AttributeList *Attr = 0;
+ // If attributes exist after tag, parse them.
+ if (Tok.is(tok::kw___attribute))
+ Attr = ParseAttributes();
+
+ CXXScopeSpec SS;
+ if (getLang().CPlusPlus && ParseOptionalCXXScopeSpecifier(SS)) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ if (Tok.isNot(tok::l_brace)) {
+ // Has no name and is not a definition.
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, true);
+ return;
+ }
+ }
+ }
+
+ // Must have either 'enum name' or 'enum {...}'.
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_ident_lbrace);
+
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, true);
+ return;
+ }
+
+ // If an identifier is present, consume and remember it.
+ IdentifierInfo *Name = 0;
+ SourceLocation NameLoc;
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ }
+
+ // There are three options here. If we have 'enum foo;', then this is a
+ // forward declaration. If we have 'enum foo {...' then this is a
+ // definition. Otherwise we have something like 'enum foo xyz', a reference.
+ //
+ // This is needed to handle stuff like this right (C99 6.7.2.3p11):
+ // enum foo {..}; void bar() { enum foo; } <- new foo in bar.
+ // enum foo {..}; void bar() { enum foo x; } <- use of old foo.
+ //
+ Action::TagKind TK;
+ if (Tok.is(tok::l_brace))
+ TK = Action::TK_Definition;
+ else if (Tok.is(tok::semi))
+ TK = Action::TK_Declaration;
+ else
+ TK = Action::TK_Reference;
+ bool Owned = false;
+ DeclPtrTy TagDecl = Actions.ActOnTag(CurScope, DeclSpec::TST_enum, TK,
+ StartLoc, SS, Name, NameLoc, Attr, AS,
+ Owned);
+
+ if (Tok.is(tok::l_brace))
+ ParseEnumBody(StartLoc, TagDecl);
+
+ // TODO: semantic analysis on the declspec for enums.
+ const char *PrevSpec = 0;
+ if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc, PrevSpec,
+ TagDecl.getAs<void>(), Owned))
+ Diag(StartLoc, diag::err_invalid_decl_spec_combination) << PrevSpec;
+}
+
+/// ParseEnumBody - Parse a {} enclosed enumerator-list.
+/// enumerator-list:
+/// enumerator
+/// enumerator-list ',' enumerator
+/// enumerator:
+/// enumeration-constant
+/// enumeration-constant '=' constant-expression
+/// enumeration-constant:
+/// identifier
+///
+void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) {
+ // Enter the scope of the enum body and start the definition.
+ ParseScope EnumScope(this, Scope::DeclScope);
+ Actions.ActOnTagStartDefinition(CurScope, EnumDecl);
+
+ SourceLocation LBraceLoc = ConsumeBrace();
+
+ // C does not allow an empty enumerator-list, C++ does [dcl.enum].
+ if (Tok.is(tok::r_brace) && !getLang().CPlusPlus)
+ Diag(Tok, diag::ext_empty_struct_union_enum) << "enum";
+
+ llvm::SmallVector<DeclPtrTy, 32> EnumConstantDecls;
+
+ DeclPtrTy LastEnumConstDecl;
+
+ // Parse the enumerator-list.
+ while (Tok.is(tok::identifier)) {
+ IdentifierInfo *Ident = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+
+ SourceLocation EqualLoc;
+ OwningExprResult AssignedVal(Actions);
+ if (Tok.is(tok::equal)) {
+ EqualLoc = ConsumeToken();
+ AssignedVal = ParseConstantExpression();
+ if (AssignedVal.isInvalid())
+ SkipUntil(tok::comma, tok::r_brace, true, true);
+ }
+
+ // Install the enumerator constant into EnumDecl.
+ DeclPtrTy EnumConstDecl = Actions.ActOnEnumConstant(CurScope, EnumDecl,
+ LastEnumConstDecl,
+ IdentLoc, Ident,
+ EqualLoc,
+ AssignedVal.release());
+ EnumConstantDecls.push_back(EnumConstDecl);
+ LastEnumConstDecl = EnumConstDecl;
+
+ if (Tok.isNot(tok::comma))
+ break;
+ SourceLocation CommaLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier) &&
+ !(getLang().C99 || getLang().CPlusPlus0x))
+ Diag(CommaLoc, diag::ext_enumerator_list_comma)
+ << getLang().CPlusPlus
+ << CodeModificationHint::CreateRemoval((SourceRange(CommaLoc)));
+ }
+
+ // Eat the }.
+ SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
+
+ Actions.ActOnEnumBody(StartLoc, LBraceLoc, RBraceLoc, EnumDecl,
+ EnumConstantDecls.data(), EnumConstantDecls.size());
+
+ Action::AttrTy *AttrList = 0;
+ // If attributes exist after the identifier list, parse them.
+ if (Tok.is(tok::kw___attribute))
+ AttrList = ParseAttributes(); // FIXME: where do they do?
+
+ EnumScope.Exit();
+ Actions.ActOnTagFinishDefinition(CurScope, EnumDecl);
+}
+
+/// isTypeSpecifierQualifier - Return true if the current token could be the
+/// start of a type-qualifier-list.
+bool Parser::isTypeQualifier() const {
+ switch (Tok.getKind()) {
+ default: return false;
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+ return true;
+ }
+}
+
+/// isTypeSpecifierQualifier - Return true if the current token could be the
+/// start of a specifier-qualifier-list.
+bool Parser::isTypeSpecifierQualifier() {
+ switch (Tok.getKind()) {
+ default: return false;
+
+ case tok::identifier: // foo::bar
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return isTypeSpecifierQualifier();
+ // Otherwise, not a type specifier.
+ return false;
+
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return false;
+
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return isTypeSpecifierQualifier();
+ // Otherwise, not a type specifier.
+ return false;
+
+ // GNU attributes support.
+ case tok::kw___attribute:
+ // GNU typeof support.
+ case tok::kw_typeof:
+
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+
+ // typedef-name
+ case tok::annot_typename:
+ return true;
+
+ // GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
+ case tok::less:
+ return getLang().ObjC1;
+
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ return PP.getLangOptions().Microsoft;
+ }
+}
+
+/// isDeclarationSpecifier() - Return true if the current token is part of a
+/// declaration specifier.
+bool Parser::isDeclarationSpecifier() {
+ switch (Tok.getKind()) {
+ default: return false;
+
+ case tok::identifier: // foo::bar
+ // Unfortunate hack to support "Class.factoryMethod" notation.
+ if (getLang().ObjC1 && NextToken().is(tok::period))
+ return false;
+ // Fall through
+
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return isDeclarationSpecifier();
+ // Otherwise, not a declaration specifier.
+ return false;
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return false;
+
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return isDeclarationSpecifier();
+ // Otherwise, not a declaration specifier.
+ return false;
+
+ // storage-class-specifier
+ case tok::kw_typedef:
+ case tok::kw_extern:
+ case tok::kw___private_extern__:
+ case tok::kw_static:
+ case tok::kw_auto:
+ case tok::kw_register:
+ case tok::kw___thread:
+
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+
+ // function-specifier
+ case tok::kw_inline:
+ case tok::kw_virtual:
+ case tok::kw_explicit:
+
+ // typedef-name
+ case tok::annot_typename:
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+
+ // GNU attributes.
+ case tok::kw___attribute:
+ return true;
+
+ // GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
+ case tok::less:
+ return getLang().ObjC1;
+
+ case tok::kw___declspec:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ return PP.getLangOptions().Microsoft;
+ }
+}
+
+
+/// ParseTypeQualifierListOpt
+/// type-qualifier-list: [C99 6.7.5]
+/// type-qualifier
+/// [GNU] attributes [ only if AttributesAllowed=true ]
+/// type-qualifier-list type-qualifier
+/// [GNU] type-qualifier-list attributes [ only if AttributesAllowed=true ]
+///
+void Parser::ParseTypeQualifierListOpt(DeclSpec &DS, bool AttributesAllowed) {
+ while (1) {
+ int isInvalid = false;
+ const char *PrevSpec = 0;
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ case tok::kw_const:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec,
+ getLang())*2;
+ break;
+ case tok::kw_volatile:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec,
+ getLang())*2;
+ break;
+ case tok::kw_restrict:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec,
+ getLang())*2;
+ break;
+ case tok::kw___ptr64:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ if (!PP.getLangOptions().Microsoft)
+ goto DoneWithTypeQuals;
+ // Just ignore it.
+ break;
+ case tok::kw___attribute:
+ if (AttributesAllowed) {
+ DS.AddAttributes(ParseAttributes());
+ continue; // do *not* consume the next token!
+ }
+ // otherwise, FALL THROUGH!
+ default:
+ DoneWithTypeQuals:
+ // If this is not a type-qualifier token, we're done reading type
+ // qualifiers. First verify that DeclSpec's are consistent.
+ DS.Finish(Diags, PP);
+ return;
+ }
+
+ // If the specifier combination wasn't legal, issue a diagnostic.
+ if (isInvalid) {
+ assert(PrevSpec && "Method did not return previous specifier!");
+ // Pick between error or extwarn.
+ unsigned DiagID = isInvalid == 1 ? diag::err_invalid_decl_spec_combination
+ : diag::ext_duplicate_declspec;
+ Diag(Tok, DiagID) << PrevSpec;
+ }
+ ConsumeToken();
+ }
+}
+
+
+/// ParseDeclarator - Parse and verify a newly-initialized declarator.
+///
+void Parser::ParseDeclarator(Declarator &D) {
+ /// This implements the 'declarator' production in the C grammar, then checks
+ /// for well-formedness and issues diagnostics.
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+}
+
+/// ParseDeclaratorInternal - Parse a C or C++ declarator. The direct-declarator
+/// is parsed by the function passed to it. Pass null, and the direct-declarator
+/// isn't parsed at all, making this function effectively parse the C++
+/// ptr-operator production.
+///
+/// declarator: [C99 6.7.5] [C++ 8p4, dcl.decl]
+/// [C] pointer[opt] direct-declarator
+/// [C++] direct-declarator
+/// [C++] ptr-operator declarator
+///
+/// pointer: [C99 6.7.5]
+/// '*' type-qualifier-list[opt]
+/// '*' type-qualifier-list[opt] pointer
+///
+/// ptr-operator:
+/// '*' cv-qualifier-seq[opt]
+/// '&'
+/// [C++0x] '&&'
+/// [GNU] '&' restrict[opt] attributes[opt]
+/// [GNU?] '&&' restrict[opt] attributes[opt]
+/// '::'[opt] nested-name-specifier '*' cv-qualifier-seq[opt]
+void Parser::ParseDeclaratorInternal(Declarator &D,
+ DirectDeclParseFunction DirectDeclParser) {
+
+ // C++ member pointers start with a '::' or a nested-name.
+ // Member pointers get special handling, since there's no place for the
+ // scope spec in the generic path below.
+ if (getLang().CPlusPlus &&
+ (Tok.is(tok::coloncolon) || Tok.is(tok::identifier) ||
+ Tok.is(tok::annot_cxxscope))) {
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS)) {
+ if(Tok.isNot(tok::star)) {
+ // The scope spec really belongs to the direct-declarator.
+ D.getCXXScopeSpec() = SS;
+ if (DirectDeclParser)
+ (this->*DirectDeclParser)(D);
+ return;
+ }
+
+ SourceLocation Loc = ConsumeToken();
+ D.SetRangeEnd(Loc);
+ DeclSpec DS;
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ // Recurse to parse whatever is left.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+
+ // Sema will have to catch (syntactically invalid) pointers into global
+ // scope. It has to catch pointers into namespace scope anyway.
+ D.AddTypeInfo(DeclaratorChunk::getMemberPointer(SS,DS.getTypeQualifiers(),
+ Loc, DS.TakeAttributes()),
+ /* Don't replace range end. */SourceLocation());
+ return;
+ }
+ }
+
+ tok::TokenKind Kind = Tok.getKind();
+ // Not a pointer, C++ reference, or block.
+ if (Kind != tok::star && Kind != tok::caret &&
+ (Kind != tok::amp || !getLang().CPlusPlus) &&
+ // We parse rvalue refs in C++03, because otherwise the errors are scary.
+ (Kind != tok::ampamp || !getLang().CPlusPlus)) {
+ if (DirectDeclParser)
+ (this->*DirectDeclParser)(D);
+ return;
+ }
+
+ // Otherwise, '*' -> pointer, '^' -> block, '&' -> lvalue reference,
+ // '&&' -> rvalue reference
+ SourceLocation Loc = ConsumeToken(); // Eat the *, ^, & or &&.
+ D.SetRangeEnd(Loc);
+
+ if (Kind == tok::star || Kind == tok::caret) {
+ // Is a pointer.
+ DeclSpec DS;
+
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ // Recursively parse the declarator.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+ if (Kind == tok::star)
+ // Remember that we parsed a pointer type, and remember the type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getPointer(DS.getTypeQualifiers(), Loc,
+ DS.TakeAttributes()),
+ SourceLocation());
+ else
+ // Remember that we parsed a Block type, and remember the type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getBlockPointer(DS.getTypeQualifiers(),
+ Loc, DS.TakeAttributes()),
+ SourceLocation());
+ } else {
+ // Is a reference
+ DeclSpec DS;
+
+ // Complain about rvalue references in C++03, but then go on and build
+ // the declarator.
+ if (Kind == tok::ampamp && !getLang().CPlusPlus0x)
+ Diag(Loc, diag::err_rvalue_reference);
+
+ // C++ 8.3.2p1: cv-qualified references are ill-formed except when the
+ // cv-qualifiers are introduced through the use of a typedef or of a
+ // template type argument, in which case the cv-qualifiers are ignored.
+ //
+ // [GNU] Retricted references are allowed.
+ // [GNU] Attributes on references are allowed.
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ if (DS.getTypeQualifiers() != DeclSpec::TQ_unspecified) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(),
+ diag::err_invalid_reference_qualifier_application) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getVolatileSpecLoc(),
+ diag::err_invalid_reference_qualifier_application) << "volatile";
+ }
+
+ // Recursively parse the declarator.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+
+ if (D.getNumTypeObjects() > 0) {
+ // C++ [dcl.ref]p4: There shall be no references to references.
+ DeclaratorChunk& InnerChunk = D.getTypeObject(D.getNumTypeObjects() - 1);
+ if (InnerChunk.Kind == DeclaratorChunk::Reference) {
+ if (const IdentifierInfo *II = D.getIdentifier())
+ Diag(InnerChunk.Loc, diag::err_illegal_decl_reference_to_reference)
+ << II;
+ else
+ Diag(InnerChunk.Loc, diag::err_illegal_decl_reference_to_reference)
+ << "type name";
+
+ // Once we've complained about the reference-to-reference, we
+ // can go ahead and build the (technically ill-formed)
+ // declarator: reference collapsing will take care of it.
+ }
+ }
+
+ // Remember that we parsed a reference type. It doesn't have type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getReference(DS.getTypeQualifiers(), Loc,
+ DS.TakeAttributes(),
+ Kind == tok::amp),
+ SourceLocation());
+ }
+}
+
+/// ParseDirectDeclarator
+/// direct-declarator: [C99 6.7.5]
+/// [C99] identifier
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+/// [C90] direct-declarator '[' constant-expression[opt] ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] assignment-expr[opt] ']'
+/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
+/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+/// direct-declarator '(' parameter-type-list ')'
+/// direct-declarator '(' identifier-list[opt] ')'
+/// [GNU] direct-declarator '(' parameter-forward-declarations
+/// parameter-type-list[opt] ')'
+/// [C++] direct-declarator '(' parameter-declaration-clause ')'
+/// cv-qualifier-seq[opt] exception-specification[opt]
+/// [C++] declarator-id
+///
+/// declarator-id: [C++ 8]
+/// id-expression
+/// '::'[opt] nested-name-specifier[opt] type-name
+///
+/// id-expression: [C++ 5.1]
+/// unqualified-id
+/// qualified-id [TODO]
+///
+/// unqualified-id: [C++ 5.1]
+/// identifier
+/// operator-function-id
+/// conversion-function-id [TODO]
+/// '~' class-name
+/// template-id
+///
+void Parser::ParseDirectDeclarator(Declarator &D) {
+ DeclaratorScopeObj DeclScopeObj(*this, D.getCXXScopeSpec());
+
+ if (getLang().CPlusPlus) {
+ if (D.mayHaveIdentifier()) {
+ // ParseDeclaratorInternal might already have parsed the scope.
+ bool afterCXXScope = D.getCXXScopeSpec().isSet() ||
+ ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec());
+ if (afterCXXScope) {
+ // Change the declaration context for name lookup, until this function
+ // is exited (and the declarator has been parsed).
+ DeclScopeObj.EnterDeclaratorScope();
+ }
+
+ if (Tok.is(tok::identifier)) {
+ assert(Tok.getIdentifierInfo() && "Not an identifier?");
+
+ // If this identifier is the name of the current class, it's a
+ // constructor name.
+ if (!D.getDeclSpec().hasTypeSpecifier() &&
+ Actions.isCurrentClassName(*Tok.getIdentifierInfo(),CurScope)) {
+ D.setConstructor(Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), CurScope),
+ Tok.getLocation());
+ // This is a normal identifier.
+ } else
+ D.SetIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ goto PastIdentifier;
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId
+ = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+
+ // FIXME: Could this template-id name a constructor?
+
+ // FIXME: This is an egregious hack, where we silently ignore
+ // the specialization (which should be a function template
+ // specialization name) and use the name instead. This hack
+ // will go away when we have support for function
+ // specializations.
+ D.SetIdentifier(TemplateId->Name, Tok.getLocation());
+ TemplateId->Destroy();
+ ConsumeToken();
+ goto PastIdentifier;
+ } else if (Tok.is(tok::kw_operator)) {
+ SourceLocation OperatorLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+
+ // First try the name of an overloaded operator
+ if (OverloadedOperatorKind Op = TryParseOperatorFunctionId(&EndLoc)) {
+ D.setOverloadedOperator(Op, OperatorLoc, EndLoc);
+ } else {
+ // This must be a conversion function (C++ [class.conv.fct]).
+ if (TypeTy *ConvType = ParseConversionFunctionId(&EndLoc))
+ D.setConversionFunction(ConvType, OperatorLoc, EndLoc);
+ else {
+ D.SetIdentifier(0, Tok.getLocation());
+ }
+ }
+ goto PastIdentifier;
+ } else if (Tok.is(tok::tilde)) {
+ // This should be a C++ destructor.
+ SourceLocation TildeLoc = ConsumeToken();
+ if (Tok.is(tok::identifier)) {
+ // FIXME: Inaccurate.
+ SourceLocation NameLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+ TypeResult Type = ParseClassName(EndLoc);
+ if (Type.isInvalid())
+ D.SetIdentifier(0, TildeLoc);
+ else
+ D.setDestructor(Type.get(), TildeLoc, NameLoc);
+ } else {
+ Diag(Tok, diag::err_expected_class_name);
+ D.SetIdentifier(0, TildeLoc);
+ }
+ goto PastIdentifier;
+ }
+
+ // If we reached this point, token is not identifier and not '~'.
+
+ if (afterCXXScope) {
+ Diag(Tok, diag::err_expected_unqualified_id);
+ D.SetIdentifier(0, Tok.getLocation());
+ D.setInvalidType(true);
+ goto PastIdentifier;
+ }
+ }
+ }
+
+ // If we reached this point, we are either in C/ObjC or the token didn't
+ // satisfy any of the C++-specific checks.
+ if (Tok.is(tok::identifier) && D.mayHaveIdentifier()) {
+ assert(!getLang().CPlusPlus &&
+ "There's a C++-specific check for tok::identifier above");
+ assert(Tok.getIdentifierInfo() && "Not an identifier?");
+ D.SetIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ } else if (Tok.is(tok::l_paren)) {
+ // direct-declarator: '(' declarator ')'
+ // direct-declarator: '(' attributes declarator ')'
+ // Example: 'char (*X)' or 'int (*XX)(void)'
+ ParseParenDeclarator(D);
+ } else if (D.mayOmitIdentifier()) {
+ // This could be something simple like "int" (in which case the declarator
+ // portion is empty), if an abstract-declarator is allowed.
+ D.SetIdentifier(0, Tok.getLocation());
+ } else {
+ if (D.getContext() == Declarator::MemberContext)
+ Diag(Tok, diag::err_expected_member_name_or_semi)
+ << D.getDeclSpec().getSourceRange();
+ else if (getLang().CPlusPlus)
+ Diag(Tok, diag::err_expected_unqualified_id);
+ else
+ Diag(Tok, diag::err_expected_ident_lparen);
+ D.SetIdentifier(0, Tok.getLocation());
+ D.setInvalidType(true);
+ }
+
+ PastIdentifier:
+ assert(D.isPastIdentifier() &&
+ "Haven't past the location of the identifier yet?");
+
+ while (1) {
+ if (Tok.is(tok::l_paren)) {
+ // The paren may be part of a C++ direct initializer, eg. "int x(1);".
+ // In such a case, check if we actually have a function declarator; if it
+ // is not, the declarator has been fully parsed.
+ if (getLang().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
+ // When not in file scope, warn for ambiguous function declarators, just
+ // in case the author intended it as a variable definition.
+ bool warnIfAmbiguous = D.getContext() != Declarator::FileContext;
+ if (!isCXXFunctionDeclarator(warnIfAmbiguous))
+ break;
+ }
+ ParseFunctionDeclarator(ConsumeParen(), D);
+ } else if (Tok.is(tok::l_square)) {
+ ParseBracketDeclarator(D);
+ } else {
+ break;
+ }
+ }
+}
+
+/// ParseParenDeclarator - We parsed the declarator D up to a paren. This is
+/// only called before the identifier, so these are most likely just grouping
+/// parens for precedence. If we find that these are actually function
+/// parameter parens in an abstract-declarator, we call ParseFunctionDeclarator.
+///
+/// direct-declarator:
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+/// direct-declarator '(' parameter-type-list ')'
+/// direct-declarator '(' identifier-list[opt] ')'
+/// [GNU] direct-declarator '(' parameter-forward-declarations
+/// parameter-type-list[opt] ')'
+///
+void Parser::ParseParenDeclarator(Declarator &D) {
+ SourceLocation StartLoc = ConsumeParen();
+ assert(!D.isPastIdentifier() && "Should be called before passing identifier");
+
+ // Eat any attributes before we look at whether this is a grouping or function
+ // declarator paren. If this is a grouping paren, the attribute applies to
+ // the type being built up, for example:
+ // int (__attribute__(()) *x)(long y)
+ // If this ends up not being a grouping paren, the attribute applies to the
+ // first argument, for example:
+ // int (__attribute__(()) int x)
+ // In either case, we need to eat any attributes to be able to determine what
+ // sort of paren this is.
+ //
+ AttributeList *AttrList = 0;
+ bool RequiresArg = false;
+ if (Tok.is(tok::kw___attribute)) {
+ AttrList = ParseAttributes();
+
+ // We require that the argument list (if this is a non-grouping paren) be
+ // present even if the attribute list was empty.
+ RequiresArg = true;
+ }
+ // Eat any Microsoft extensions.
+ while ((Tok.is(tok::kw___cdecl) || Tok.is(tok::kw___stdcall) ||
+ (Tok.is(tok::kw___fastcall))) && PP.getLangOptions().Microsoft)
+ ConsumeToken();
+
+ // If we haven't past the identifier yet (or where the identifier would be
+ // stored, if this is an abstract declarator), then this is probably just
+ // grouping parens. However, if this could be an abstract-declarator, then
+ // this could also be the start of function arguments (consider 'void()').
+ bool isGrouping;
+
+ if (!D.mayOmitIdentifier()) {
+ // If this can't be an abstract-declarator, this *must* be a grouping
+ // paren, because we haven't seen the identifier yet.
+ isGrouping = true;
+ } else if (Tok.is(tok::r_paren) || // 'int()' is a function.
+ (getLang().CPlusPlus && Tok.is(tok::ellipsis)) || // C++ int(...)
+ isDeclarationSpecifier()) { // 'int(int)' is a function.
+ // This handles C99 6.7.5.3p11: in "typedef int X; void foo(X)", X is
+ // considered to be a type, not a K&R identifier-list.
+ isGrouping = false;
+ } else {
+ // Otherwise, this is a grouping paren, e.g. 'int (*X)' or 'int(X)'.
+ isGrouping = true;
+ }
+
+ // If this is a grouping paren, handle:
+ // direct-declarator: '(' declarator ')'
+ // direct-declarator: '(' attributes declarator ')'
+ if (isGrouping) {
+ bool hadGroupingParens = D.hasGroupingParens();
+ D.setGroupingParens(true);
+ if (AttrList)
+ D.AddAttributes(AttrList, SourceLocation());
+
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+ // Match the ')'.
+ SourceLocation Loc = MatchRHSPunctuation(tok::r_paren, StartLoc);
+
+ D.setGroupingParens(hadGroupingParens);
+ D.SetRangeEnd(Loc);
+ return;
+ }
+
+ // Okay, if this wasn't a grouping paren, it must be the start of a function
+ // argument list. Recognize that this declarator will never have an
+ // identifier (and remember where it would have been), then call into
+ // ParseFunctionDeclarator to handle of argument list.
+ D.SetIdentifier(0, Tok.getLocation());
+
+ ParseFunctionDeclarator(StartLoc, D, AttrList, RequiresArg);
+}
+
+/// ParseFunctionDeclarator - We are after the identifier and have parsed the
+/// declarator D up to a paren, which indicates that we are parsing function
+/// arguments.
+///
+/// If AttrList is non-null, then the caller parsed those arguments immediately
+/// after the open paren - they should be considered to be the first argument of
+/// a parameter. If RequiresArg is true, then the first argument of the
+/// function is required to be present and required to not be an identifier
+/// list.
+///
+/// This method also handles this portion of the grammar:
+/// parameter-type-list: [C99 6.7.5]
+/// parameter-list
+/// parameter-list ',' '...'
+///
+/// parameter-list: [C99 6.7.5]
+/// parameter-declaration
+/// parameter-list ',' parameter-declaration
+///
+/// parameter-declaration: [C99 6.7.5]
+/// declaration-specifiers declarator
+/// [C++] declaration-specifiers declarator '=' assignment-expression
+/// [GNU] declaration-specifiers declarator attributes
+/// declaration-specifiers abstract-declarator[opt]
+/// [C++] declaration-specifiers abstract-declarator[opt]
+/// '=' assignment-expression
+/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
+///
+/// For C++, after the parameter-list, it also parses "cv-qualifier-seq[opt]"
+/// and "exception-specification[opt]".
+///
+void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
+ AttributeList *AttrList,
+ bool RequiresArg) {
+ // lparen is already consumed!
+ assert(D.isPastIdentifier() && "Should not call before identifier!");
+
+ // This parameter list may be empty.
+ if (Tok.is(tok::r_paren)) {
+ if (RequiresArg) {
+ Diag(Tok, diag::err_argument_required_after_attribute);
+ delete AttrList;
+ }
+
+ SourceLocation Loc = ConsumeParen(); // Eat the closing ')'.
+
+ // cv-qualifier-seq[opt].
+ DeclSpec DS;
+ bool hasExceptionSpec = false;
+ SourceLocation ThrowLoc;
+ bool hasAnyExceptionSpec = false;
+ llvm::SmallVector<TypeTy*, 2> Exceptions;
+ llvm::SmallVector<SourceRange, 2> ExceptionRanges;
+ if (getLang().CPlusPlus) {
+ ParseTypeQualifierListOpt(DS, false /*no attributes*/);
+ if (!DS.getSourceRange().getEnd().isInvalid())
+ Loc = DS.getSourceRange().getEnd();
+
+ // Parse exception-specification[opt].
+ if (Tok.is(tok::kw_throw)) {
+ hasExceptionSpec = true;
+ ThrowLoc = Tok.getLocation();
+ ParseExceptionSpecification(Loc, Exceptions, ExceptionRanges,
+ hasAnyExceptionSpec);
+ assert(Exceptions.size() == ExceptionRanges.size() &&
+ "Produced different number of exception types and ranges.");
+ }
+ }
+
+ // Remember that we parsed a function type, and remember the attributes.
+ // int() -> no prototype, no '...'.
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*prototype*/getLang().CPlusPlus,
+ /*variadic*/ false,
+ SourceLocation(),
+ /*arglist*/ 0, 0,
+ DS.getTypeQualifiers(),
+ hasExceptionSpec, ThrowLoc,
+ hasAnyExceptionSpec,
+ Exceptions.data(),
+ ExceptionRanges.data(),
+ Exceptions.size(),
+ LParenLoc, D),
+ Loc);
+ return;
+ }
+
+ // Alternatively, this parameter list may be an identifier list form for a
+ // K&R-style function: void foo(a,b,c)
+ if (!getLang().CPlusPlus && Tok.is(tok::identifier)) {
+ if (!TryAnnotateTypeOrScopeToken()) {
+ // K&R identifier lists can't have typedefs as identifiers, per
+ // C99 6.7.5.3p11.
+ if (RequiresArg) {
+ Diag(Tok, diag::err_argument_required_after_attribute);
+ delete AttrList;
+ }
+ // Identifier list. Note that '(' identifier-list ')' is only allowed for
+ // normal declarators, not for abstract-declarators.
+ return ParseFunctionDeclaratorIdentifierList(LParenLoc, D);
+ }
+ }
+
+ // Finally, a normal, non-empty parameter type list.
+
+ // Build up an array of information about the parsed arguments.
+ llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+
+ bool IsVariadic = false;
+ SourceLocation EllipsisLoc;
+ while (1) {
+ if (Tok.is(tok::ellipsis)) {
+ IsVariadic = true;
+ EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
+ break;
+ }
+
+ SourceLocation DSStart = Tok.getLocation();
+
+ // Parse the declaration-specifiers.
+ DeclSpec DS;
+
+ // If the caller parsed attributes for the first argument, add them now.
+ if (AttrList) {
+ DS.AddAttributes(AttrList);
+ AttrList = 0; // Only apply the attributes to the first parameter.
+ }
+ ParseDeclarationSpecifiers(DS);
+
+ // Parse the declarator. This is "PrototypeContext", because we must
+ // accept either 'declarator' or 'abstract-declarator' here.
+ Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ ParseDeclarator(ParmDecl);
+
+ // Parse GNU attributes, if present.
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ ParmDecl.AddAttributes(AttrList, Loc);
+ }
+
+ // Remember this parsed parameter in ParamInfo.
+ IdentifierInfo *ParmII = ParmDecl.getIdentifier();
+
+ // DefArgToks is used when the parsing of default arguments needs
+ // to be delayed.
+ CachedTokens *DefArgToks = 0;
+
+ // If no parameter was specified, verify that *something* was specified,
+ // otherwise we have a missing type and identifier.
+ if (DS.isEmpty() && ParmDecl.getIdentifier() == 0 &&
+ ParmDecl.getNumTypeObjects() == 0) {
+ // Completely missing, emit error.
+ Diag(DSStart, diag::err_missing_param);
+ } else {
+ // Otherwise, we have something. Add it and let semantic analysis try
+ // to grok it and add the result to the ParamInfo we are building.
+
+ // Inform the actions module about the parameter declarator, so it gets
+ // added to the current scope.
+ DeclPtrTy Param = Actions.ActOnParamDeclarator(CurScope, ParmDecl);
+
+ // Parse the default argument, if any. We parse the default
+ // arguments in all dialects; the semantic analysis in
+ // ActOnParamDefaultArgument will reject the default argument in
+ // C.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = Tok.getLocation();
+
+ // Parse the default argument
+ if (D.getContext() == Declarator::MemberContext) {
+ // If we're inside a class definition, cache the tokens
+ // corresponding to the default argument. We'll actually parse
+ // them when we see the end of the class definition.
+ // FIXME: Templates will require something similar.
+ // FIXME: Can we use a smart pointer for Toks?
+ DefArgToks = new CachedTokens;
+
+ if (!ConsumeAndStoreUntil(tok::comma, tok::r_paren, *DefArgToks,
+ tok::semi, false)) {
+ delete DefArgToks;
+ DefArgToks = 0;
+ Actions.ActOnParamDefaultArgumentError(Param);
+ } else
+ Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc);
+ } else {
+ // Consume the '='.
+ ConsumeToken();
+
+ OwningExprResult DefArgResult(ParseAssignmentExpression());
+ if (DefArgResult.isInvalid()) {
+ Actions.ActOnParamDefaultArgumentError(Param);
+ SkipUntil(tok::comma, tok::r_paren, true, true);
+ } else {
+ // Inform the actions module about the default argument
+ Actions.ActOnParamDefaultArgument(Param, EqualLoc,
+ move(DefArgResult));
+ }
+ }
+ }
+
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ ParmDecl.getIdentifierLoc(), Param,
+ DefArgToks));
+ }
+
+ // If the next token is a comma, consume it and keep reading arguments.
+ if (Tok.isNot(tok::comma)) break;
+
+ // Consume the comma.
+ ConsumeToken();
+ }
+
+ // Leave prototype scope.
+ PrototypeScope.Exit();
+
+ // If we have the closing ')', eat it.
+ SourceLocation Loc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ DeclSpec DS;
+ bool hasExceptionSpec = false;
+ SourceLocation ThrowLoc;
+ bool hasAnyExceptionSpec = false;
+ llvm::SmallVector<TypeTy*, 2> Exceptions;
+ llvm::SmallVector<SourceRange, 2> ExceptionRanges;
+ if (getLang().CPlusPlus) {
+ // Parse cv-qualifier-seq[opt].
+ ParseTypeQualifierListOpt(DS, false /*no attributes*/);
+ if (!DS.getSourceRange().getEnd().isInvalid())
+ Loc = DS.getSourceRange().getEnd();
+
+ // Parse exception-specification[opt].
+ if (Tok.is(tok::kw_throw)) {
+ hasExceptionSpec = true;
+ ThrowLoc = Tok.getLocation();
+ ParseExceptionSpecification(Loc, Exceptions, ExceptionRanges,
+ hasAnyExceptionSpec);
+ assert(Exceptions.size() == ExceptionRanges.size() &&
+ "Produced different number of exception types and ranges.");
+ }
+ }
+
+ // Remember that we parsed a function type, and remember the attributes.
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*proto*/true, IsVariadic,
+ EllipsisLoc,
+ ParamInfo.data(), ParamInfo.size(),
+ DS.getTypeQualifiers(),
+ hasExceptionSpec, ThrowLoc,
+ hasAnyExceptionSpec,
+ Exceptions.data(),
+ ExceptionRanges.data(),
+ Exceptions.size(), LParenLoc, D),
+ Loc);
+}
+
+/// ParseFunctionDeclaratorIdentifierList - While parsing a function declarator
+/// we found a K&R-style identifier list instead of a type argument list. The
+/// current token is known to be the first identifier in the list.
+///
+/// identifier-list: [C99 6.7.5]
+/// identifier
+/// identifier-list ',' identifier
+///
+void Parser::ParseFunctionDeclaratorIdentifierList(SourceLocation LParenLoc,
+ Declarator &D) {
+ // Build up an array of information about the parsed arguments.
+ llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ llvm::SmallSet<const IdentifierInfo*, 16> ParamsSoFar;
+
+ // If there was no identifier specified for the declarator, either we are in
+ // an abstract-declarator, or we are in a parameter declarator which was found
+ // to be abstract. In abstract-declarators, identifier lists are not valid:
+ // diagnose this.
+ if (!D.getIdentifier())
+ Diag(Tok, diag::ext_ident_list_in_param);
+
+ // Tok is known to be the first identifier in the list. Remember this
+ // identifier in ParamInfo.
+ ParamsSoFar.insert(Tok.getIdentifierInfo());
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(Tok.getIdentifierInfo(),
+ Tok.getLocation(),
+ DeclPtrTy()));
+
+ ConsumeToken(); // eat the first identifier.
+
+ while (Tok.is(tok::comma)) {
+ // Eat the comma.
+ ConsumeToken();
+
+ // If this isn't an identifier, report the error and skip until ')'.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ IdentifierInfo *ParmII = Tok.getIdentifierInfo();
+
+ // Reject 'typedef int y; int test(x, y)', but continue parsing.
+ if (Actions.getTypeName(*ParmII, Tok.getLocation(), CurScope))
+ Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII;
+
+ // Verify that the argument identifier has not already been mentioned.
+ if (!ParamsSoFar.insert(ParmII)) {
+ Diag(Tok, diag::err_param_redefinition) << ParmII;
+ } else {
+ // Remember this identifier in ParamInfo.
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ Tok.getLocation(),
+ DeclPtrTy()));
+ }
+
+ // Eat the identifier.
+ ConsumeToken();
+ }
+
+ // If we have the closing ')', eat it and we're done.
+ SourceLocation RLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ // Remember that we parsed a function type, and remember the attributes. This
+ // function type is always a K&R style function type, which is not varargs and
+ // has no prototype.
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*proto*/false, /*varargs*/false,
+ SourceLocation(),
+ &ParamInfo[0], ParamInfo.size(),
+ /*TypeQuals*/0,
+ /*exception*/false,
+ SourceLocation(), false, 0, 0, 0,
+ LParenLoc, D),
+ RLoc);
+}
+
+/// [C90] direct-declarator '[' constant-expression[opt] ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] assignment-expr[opt] ']'
+/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
+/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+void Parser::ParseBracketDeclarator(Declarator &D) {
+ SourceLocation StartLoc = ConsumeBracket();
+
+ // C array syntax has many features, but by-far the most common is [] and [4].
+ // This code does a fast path to handle some of the most obvious cases.
+ if (Tok.getKind() == tok::r_square) {
+ SourceLocation EndLoc = MatchRHSPunctuation(tok::r_square, StartLoc);
+ // Remember that we parsed the empty array type.
+ OwningExprResult NumElements(Actions);
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, 0, StartLoc),
+ EndLoc);
+ return;
+ } else if (Tok.getKind() == tok::numeric_constant &&
+ GetLookAheadToken(1).is(tok::r_square)) {
+ // [4] is very common. Parse the numeric constant expression.
+ OwningExprResult ExprRes(Actions.ActOnNumericConstant(Tok));
+ ConsumeToken();
+
+ SourceLocation EndLoc = MatchRHSPunctuation(tok::r_square, StartLoc);
+
+ // If there was an error parsing the assignment-expression, recover.
+ if (ExprRes.isInvalid())
+ ExprRes.release(); // Deallocate expr, just use [].
+
+ // Remember that we parsed a array type, and remember its features.
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, false, 0,
+ ExprRes.release(), StartLoc),
+ EndLoc);
+ return;
+ }
+
+ // If valid, this location is the position where we read the 'static' keyword.
+ SourceLocation StaticLoc;
+ if (Tok.is(tok::kw_static))
+ StaticLoc = ConsumeToken();
+
+ // If there is a type-qualifier-list, read it now.
+ // Type qualifiers in an array subscript are a C99 feature.
+ DeclSpec DS;
+ ParseTypeQualifierListOpt(DS, false /*no attributes*/);
+
+ // If we haven't already read 'static', check to see if there is one after the
+ // type-qualifier-list.
+ if (!StaticLoc.isValid() && Tok.is(tok::kw_static))
+ StaticLoc = ConsumeToken();
+
+ // Handle "direct-declarator [ type-qual-list[opt] * ]".
+ bool isStar = false;
+ OwningExprResult NumElements(Actions);
+
+ // Handle the case where we have '[*]' as the array size. However, a leading
+ // star could be the start of an expression, for example 'X[*p + 4]'. Verify
+ // the the token after the star is a ']'. Since stars in arrays are
+ // infrequent, use of lookahead is not costly here.
+ if (Tok.is(tok::star) && GetLookAheadToken(1).is(tok::r_square)) {
+ ConsumeToken(); // Eat the '*'.
+
+ if (StaticLoc.isValid()) {
+ Diag(StaticLoc, diag::err_unspecified_vla_size_with_static);
+ StaticLoc = SourceLocation(); // Drop the static.
+ }
+ isStar = true;
+ } else if (Tok.isNot(tok::r_square)) {
+ // Note, in C89, this production uses the constant-expr production instead
+ // of assignment-expr. The only difference is that assignment-expr allows
+ // things like '=' and '*='. Sema rejects these in C89 mode because they
+ // are not i-c-e's, so we don't need to distinguish between the two here.
+
+ // Parse the assignment-expression now.
+ NumElements = ParseAssignmentExpression();
+ }
+
+ // If there was an error parsing the assignment-expression, recover.
+ if (NumElements.isInvalid()) {
+ D.setInvalidType(true);
+ // If the expression was invalid, skip it.
+ SkipUntil(tok::r_square);
+ return;
+ }
+
+ SourceLocation EndLoc = MatchRHSPunctuation(tok::r_square, StartLoc);
+
+ // Remember that we parsed a array type, and remember its features.
+ D.AddTypeInfo(DeclaratorChunk::getArray(DS.getTypeQualifiers(),
+ StaticLoc.isValid(), isStar,
+ NumElements.release(), StartLoc),
+ EndLoc);
+}
+
+/// [GNU] typeof-specifier:
+/// typeof ( expressions )
+/// typeof ( type-name )
+/// [GNU/C++] typeof unary-expression
+///
+void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
+ assert(Tok.is(tok::kw_typeof) && "Not a typeof specifier");
+ Token OpTok = Tok;
+ SourceLocation StartLoc = ConsumeToken();
+
+ bool isCastExpr;
+ TypeTy *CastTy;
+ SourceRange CastRange;
+ OwningExprResult Operand = ParseExprAfterTypeofSizeofAlignof(OpTok,
+ isCastExpr,
+ CastTy,
+ CastRange);
+
+ if (CastRange.getEnd().isInvalid())
+ // FIXME: Not accurate, the range gets one token more than it should.
+ DS.SetRangeEnd(Tok.getLocation());
+ else
+ DS.SetRangeEnd(CastRange.getEnd());
+
+ if (isCastExpr) {
+ if (!CastTy) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ const char *PrevSpec = 0;
+ // Check for duplicate type specifiers (e.g. "int typeof(int)").
+ if (DS.SetTypeSpecType(DeclSpec::TST_typeofType, StartLoc, PrevSpec,
+ CastTy))
+ Diag(StartLoc, diag::err_invalid_decl_spec_combination) << PrevSpec;
+ return;
+ }
+
+ // If we get here, the operand to the typeof was an expresion.
+ if (Operand.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ const char *PrevSpec = 0;
+ // Check for duplicate type specifiers (e.g. "int typeof(int)").
+ if (DS.SetTypeSpecType(DeclSpec::TST_typeofExpr, StartLoc, PrevSpec,
+ Operand.release()))
+ Diag(StartLoc, diag::err_invalid_decl_spec_combination) << PrevSpec;
+}
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
new file mode 100644
index 0000000..809dc10
--- /dev/null
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -0,0 +1,1292 @@
+//===--- ParseDeclCXX.cpp - C++ Declaration Parsing -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ Declaration portions of the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+#include "ExtensionRAIIObject.h"
+using namespace clang;
+
+/// ParseNamespace - We know that the current token is a namespace keyword. This
+/// may either be a top level namespace or a block-level namespace alias.
+///
+/// namespace-definition: [C++ 7.3: basic.namespace]
+/// named-namespace-definition
+/// unnamed-namespace-definition
+///
+/// unnamed-namespace-definition:
+/// 'namespace' attributes[opt] '{' namespace-body '}'
+///
+/// named-namespace-definition:
+/// original-namespace-definition
+/// extension-namespace-definition
+///
+/// original-namespace-definition:
+/// 'namespace' identifier attributes[opt] '{' namespace-body '}'
+///
+/// extension-namespace-definition:
+/// 'namespace' original-namespace-name '{' namespace-body '}'
+///
+/// namespace-alias-definition: [C++ 7.3.2: namespace.alias]
+/// 'namespace' identifier '=' qualified-namespace-specifier ';'
+///
+Parser::DeclPtrTy Parser::ParseNamespace(unsigned Context,
+ SourceLocation &DeclEnd) {
+ assert(Tok.is(tok::kw_namespace) && "Not a namespace!");
+ SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
+
+ SourceLocation IdentLoc;
+ IdentifierInfo *Ident = 0;
+
+ if (Tok.is(tok::identifier)) {
+ Ident = Tok.getIdentifierInfo();
+ IdentLoc = ConsumeToken(); // eat the identifier.
+ }
+
+ // Read label attributes, if present.
+ Action::AttrTy *AttrList = 0;
+ if (Tok.is(tok::kw___attribute))
+ // FIXME: save these somewhere.
+ AttrList = ParseAttributes();
+
+ if (Tok.is(tok::equal))
+ // FIXME: Verify no attributes were present.
+ return ParseNamespaceAlias(NamespaceLoc, IdentLoc, Ident, DeclEnd);
+
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, Ident ? diag::err_expected_lbrace :
+ diag::err_expected_ident_lbrace);
+ return DeclPtrTy();
+ }
+
+ SourceLocation LBrace = ConsumeBrace();
+
+ // Enter a scope for the namespace.
+ ParseScope NamespaceScope(this, Scope::DeclScope);
+
+ DeclPtrTy NamespcDecl =
+ Actions.ActOnStartNamespaceDef(CurScope, IdentLoc, Ident, LBrace);
+
+ PrettyStackTraceActionsDecl CrashInfo(NamespcDecl, NamespaceLoc, Actions,
+ PP.getSourceManager(),
+ "parsing namespace");
+
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof))
+ ParseExternalDeclaration();
+
+ // Leave the namespace scope.
+ NamespaceScope.Exit();
+
+ SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBrace);
+ Actions.ActOnFinishNamespaceDef(NamespcDecl, RBraceLoc);
+
+ DeclEnd = RBraceLoc;
+ return NamespcDecl;
+}
+
+/// ParseNamespaceAlias - Parse the part after the '=' in a namespace
+/// alias definition.
+///
+Parser::DeclPtrTy Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ SourceLocation &DeclEnd) {
+ assert(Tok.is(tok::equal) && "Not equal token");
+
+ ConsumeToken(); // eat the '='.
+
+ CXXScopeSpec SS;
+ // Parse (optional) nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(SS);
+
+ if (SS.isInvalid() || Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_namespace_name);
+ // Skip to end of the definition and eat the ';'.
+ SkipUntil(tok::semi);
+ return DeclPtrTy();
+ }
+
+ // Parse identifier.
+ IdentifierInfo *Ident = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+
+ // Eat the ';'.
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ "namespace name", tok::semi);
+
+ return Actions.ActOnNamespaceAliasDef(CurScope, NamespaceLoc, AliasLoc, Alias,
+ SS, IdentLoc, Ident);
+}
+
+/// ParseLinkage - We know that the current token is a string_literal
+/// and just before that, that extern was seen.
+///
+/// linkage-specification: [C++ 7.5p2: dcl.link]
+/// 'extern' string-literal '{' declaration-seq[opt] '}'
+/// 'extern' string-literal declaration
+///
+Parser::DeclPtrTy Parser::ParseLinkage(unsigned Context) {
+ assert(Tok.is(tok::string_literal) && "Not a string literal!");
+ llvm::SmallVector<char, 8> LangBuffer;
+ // LangBuffer is guaranteed to be big enough.
+ LangBuffer.resize(Tok.getLength());
+ const char *LangBufPtr = &LangBuffer[0];
+ unsigned StrSize = PP.getSpelling(Tok, LangBufPtr);
+
+ SourceLocation Loc = ConsumeStringToken();
+
+ ParseScope LinkageScope(this, Scope::DeclScope);
+ DeclPtrTy LinkageSpec
+ = Actions.ActOnStartLinkageSpecification(CurScope,
+ /*FIXME: */SourceLocation(),
+ Loc, LangBufPtr, StrSize,
+ Tok.is(tok::l_brace)? Tok.getLocation()
+ : SourceLocation());
+
+ if (Tok.isNot(tok::l_brace)) {
+ ParseDeclarationOrFunctionDefinition();
+ return Actions.ActOnFinishLinkageSpecification(CurScope, LinkageSpec,
+ SourceLocation());
+ }
+
+ SourceLocation LBrace = ConsumeBrace();
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ ParseExternalDeclaration();
+ }
+
+ SourceLocation RBrace = MatchRHSPunctuation(tok::r_brace, LBrace);
+ return Actions.ActOnFinishLinkageSpecification(CurScope, LinkageSpec, RBrace);
+}
+
+/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
+/// using-directive. Assumes that current token is 'using'.
+Parser::DeclPtrTy Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
+ SourceLocation &DeclEnd) {
+ assert(Tok.is(tok::kw_using) && "Not using token");
+
+ // Eat 'using'.
+ SourceLocation UsingLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_namespace))
+ // Next token after 'using' is 'namespace' so it must be using-directive
+ return ParseUsingDirective(Context, UsingLoc, DeclEnd);
+
+ // Otherwise, it must be using-declaration.
+ return ParseUsingDeclaration(Context, UsingLoc, DeclEnd);
+}
+
+/// ParseUsingDirective - Parse C++ using-directive, assumes
+/// that current token is 'namespace' and 'using' was already parsed.
+///
+/// using-directive: [C++ 7.3.p4: namespace.udir]
+/// 'using' 'namespace' ::[opt] nested-name-specifier[opt]
+/// namespace-name ;
+/// [GNU] using-directive:
+/// 'using' 'namespace' ::[opt] nested-name-specifier[opt]
+/// namespace-name attributes[opt] ;
+///
+Parser::DeclPtrTy Parser::ParseUsingDirective(unsigned Context,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd) {
+ assert(Tok.is(tok::kw_namespace) && "Not 'namespace' token");
+
+ // Eat 'namespace'.
+ SourceLocation NamespcLoc = ConsumeToken();
+
+ CXXScopeSpec SS;
+ // Parse (optional) nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(SS);
+
+ AttributeList *AttrList = 0;
+ IdentifierInfo *NamespcName = 0;
+ SourceLocation IdentLoc = SourceLocation();
+
+ // Parse namespace-name.
+ if (SS.isInvalid() || Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_namespace_name);
+ // If there was invalid namespace name, skip to end of decl, and eat ';'.
+ SkipUntil(tok::semi);
+ // FIXME: Are there cases, when we would like to call ActOnUsingDirective?
+ return DeclPtrTy();
+ }
+
+ // Parse identifier.
+ NamespcName = Tok.getIdentifierInfo();
+ IdentLoc = ConsumeToken();
+
+ // Parse (optional) attributes (most likely GNU strong-using extension).
+ if (Tok.is(tok::kw___attribute))
+ AttrList = ParseAttributes();
+
+ // Eat ';'.
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ AttrList ? "attributes list" : "namespace name", tok::semi);
+
+ return Actions.ActOnUsingDirective(CurScope, UsingLoc, NamespcLoc, SS,
+ IdentLoc, NamespcName, AttrList);
+}
+
+/// ParseUsingDeclaration - Parse C++ using-declaration. Assumes that
+/// 'using' was already seen.
+///
+/// using-declaration: [C++ 7.3.p3: namespace.udecl]
+/// 'using' 'typename'[opt] ::[opt] nested-name-specifier
+/// unqualified-id [TODO]
+/// 'using' :: unqualified-id [TODO]
+///
+Parser::DeclPtrTy Parser::ParseUsingDeclaration(unsigned Context,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd) {
+ assert(false && "Not implemented");
+ // FIXME: Implement parsing.
+ return DeclPtrTy();
+}
+
+/// ParseStaticAssertDeclaration - Parse C++0x static_assert-declaratoion.
+///
+/// static_assert-declaration:
+/// static_assert ( constant-expression , string-literal ) ;
+///
+Parser::DeclPtrTy Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
+ assert(Tok.is(tok::kw_static_assert) && "Not a static_assert declaration");
+ SourceLocation StaticAssertLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen);
+ return DeclPtrTy();
+ }
+
+ SourceLocation LParenLoc = ConsumeParen();
+
+ OwningExprResult AssertExpr(ParseConstantExpression());
+ if (AssertExpr.isInvalid()) {
+ SkipUntil(tok::semi);
+ return DeclPtrTy();
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", tok::semi))
+ return DeclPtrTy();
+
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(Tok, diag::err_expected_string_literal);
+ SkipUntil(tok::semi);
+ return DeclPtrTy();
+ }
+
+ OwningExprResult AssertMessage(ParseStringLiteralExpression());
+ if (AssertMessage.isInvalid())
+ return DeclPtrTy();
+
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_static_assert);
+
+ return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc, move(AssertExpr),
+ move(AssertMessage));
+}
+
+/// ParseClassName - Parse a C++ class-name, which names a class. Note
+/// that we only check that the result names a type; semantic analysis
+/// will need to verify that the type names a class. The result is
+/// either a type or NULL, depending on whether a type name was
+/// found.
+///
+/// class-name: [C++ 9.1]
+/// identifier
+/// simple-template-id
+///
+Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
+ const CXXScopeSpec *SS) {
+ // Check whether we have a template-id that names a type.
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId
+ = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ if (TemplateId->Kind == TNK_Type_template) {
+ AnnotateTemplateIdTokenAsType(SS);
+
+ assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
+ TypeTy *Type = Tok.getAnnotationValue();
+ EndLocation = Tok.getAnnotationEndLoc();
+ ConsumeToken();
+
+ if (Type)
+ return Type;
+ return true;
+ }
+
+ // Fall through to produce an error below.
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_class_name);
+ return true;
+ }
+
+ // We have an identifier; check whether it is actually a type.
+ TypeTy *Type = Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), CurScope, SS);
+ if (!Type) {
+ Diag(Tok, diag::err_expected_class_name);
+ return true;
+ }
+
+ // Consume the identifier.
+ EndLocation = ConsumeToken();
+ return Type;
+}
+
+/// ParseClassSpecifier - Parse a C++ class-specifier [C++ class] or
+/// elaborated-type-specifier [C++ dcl.type.elab]; we can't tell which
+/// until we reach the start of a definition or see a token that
+/// cannot start a definition.
+///
+/// class-specifier: [C++ class]
+/// class-head '{' member-specification[opt] '}'
+/// class-head '{' member-specification[opt] '}' attributes[opt]
+/// class-head:
+/// class-key identifier[opt] base-clause[opt]
+/// class-key nested-name-specifier identifier base-clause[opt]
+/// class-key nested-name-specifier[opt] simple-template-id
+/// base-clause[opt]
+/// [GNU] class-key attributes[opt] identifier[opt] base-clause[opt]
+/// [GNU] class-key attributes[opt] nested-name-specifier
+/// identifier base-clause[opt]
+/// [GNU] class-key attributes[opt] nested-name-specifier[opt]
+/// simple-template-id base-clause[opt]
+/// class-key:
+/// 'class'
+/// 'struct'
+/// 'union'
+///
+/// elaborated-type-specifier: [C++ dcl.type.elab]
+/// class-key ::[opt] nested-name-specifier[opt] identifier
+/// class-key ::[opt] nested-name-specifier[opt] 'template'[opt]
+/// simple-template-id
+///
+/// Note that the C++ class-specifier and elaborated-type-specifier,
+/// together, subsume the C99 struct-or-union-specifier:
+///
+/// struct-or-union-specifier: [C99 6.7.2.1]
+/// struct-or-union identifier[opt] '{' struct-contents '}'
+/// struct-or-union identifier
+/// [GNU] struct-or-union attributes[opt] identifier[opt] '{' struct-contents
+/// '}' attributes[opt]
+/// [GNU] struct-or-union attributes[opt] identifier
+/// struct-or-union:
+/// 'struct'
+/// 'union'
+void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
+ SourceLocation StartLoc, DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS) {
+ DeclSpec::TST TagType;
+ if (TagTokKind == tok::kw_struct)
+ TagType = DeclSpec::TST_struct;
+ else if (TagTokKind == tok::kw_class)
+ TagType = DeclSpec::TST_class;
+ else {
+ assert(TagTokKind == tok::kw_union && "Not a class specifier");
+ TagType = DeclSpec::TST_union;
+ }
+
+ AttributeList *Attr = 0;
+ // If attributes exist after tag, parse them.
+ if (Tok.is(tok::kw___attribute))
+ Attr = ParseAttributes();
+
+ // If declspecs exist after tag, parse them.
+ if (Tok.is(tok::kw___declspec) && PP.getLangOptions().Microsoft)
+ FuzzyParseMicrosoftDeclSpec();
+
+ // Parse the (optional) nested-name-specifier.
+ CXXScopeSpec SS;
+ if (getLang().CPlusPlus && ParseOptionalCXXScopeSpecifier(SS))
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id))
+ Diag(Tok, diag::err_expected_ident);
+
+ // Parse the (optional) class name or simple-template-id.
+ IdentifierInfo *Name = 0;
+ SourceLocation NameLoc;
+ TemplateIdAnnotation *TemplateId = 0;
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateId = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ NameLoc = ConsumeToken();
+
+ if (TemplateId->Kind != TNK_Type_template) {
+ // The template-name in the simple-template-id refers to
+ // something other than a class template. Give an appropriate
+ // error message and skip to the ';'.
+ SourceRange Range(NameLoc);
+ if (SS.isNotEmpty())
+ Range.setBegin(SS.getBeginLoc());
+
+ Diag(TemplateId->LAngleLoc, diag::err_template_spec_syntax_non_template)
+ << Name << static_cast<int>(TemplateId->Kind) << Range;
+
+ DS.SetTypeSpecError();
+ SkipUntil(tok::semi, false, true);
+ TemplateId->Destroy();
+ return;
+ }
+ }
+
+ // There are three options here. If we have 'struct foo;', then
+ // this is a forward declaration. If we have 'struct foo {...' or
+ // 'struct foo :...' then this is a definition. Otherwise we have
+ // something like 'struct foo xyz', a reference.
+ Action::TagKind TK;
+ if (Tok.is(tok::l_brace) || (getLang().CPlusPlus && Tok.is(tok::colon)))
+ TK = Action::TK_Definition;
+ else if (Tok.is(tok::semi) && !DS.isFriendSpecified())
+ TK = Action::TK_Declaration;
+ else
+ TK = Action::TK_Reference;
+
+ if (!Name && !TemplateId && TK != Action::TK_Definition) {
+ // We have a declaration or reference to an anonymous class.
+ Diag(StartLoc, diag::err_anon_type_definition)
+ << DeclSpec::getSpecifierName(TagType);
+
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, true);
+
+ if (TemplateId)
+ TemplateId->Destroy();
+ return;
+ }
+
+ // Create the tag portion of the class or class template.
+ Action::DeclResult TagOrTempResult;
+ TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
+
+ // FIXME: When TK == TK_Reference and we have a template-id, we need
+ // to turn that template-id into a type.
+
+ bool Owned = false;
+ if (TemplateId && TK != Action::TK_Reference) {
+ // Explicit specialization, class template partial specialization,
+ // or explicit instantiation.
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions,
+ TemplateId->getTemplateArgs(),
+ TemplateId->getTemplateArgIsType(),
+ TemplateId->NumArgs);
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TK == Action::TK_Declaration) {
+ // This is an explicit instantiation of a class template.
+ TagOrTempResult
+ = Actions.ActOnExplicitInstantiation(CurScope,
+ TemplateInfo.TemplateLoc,
+ TagType,
+ StartLoc,
+ SS,
+ TemplateTy::make(TemplateId->Template),
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->getTemplateArgLocations(),
+ TemplateId->RAngleLoc,
+ Attr);
+ } else {
+ // This is an explicit specialization or a class template
+ // partial specialization.
+ TemplateParameterLists FakedParamLists;
+
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // This looks like an explicit instantiation, because we have
+ // something like
+ //
+ // template class Foo<X>
+ //
+ // but it actually has a definition. Most likely, this was
+ // meant to be an explicit specialization, but the user forgot
+ // the '<>' after 'template'.
+ assert(TK == Action::TK_Definition && "Expected a definition here");
+
+ SourceLocation LAngleLoc
+ = PP.getLocForEndOfToken(TemplateInfo.TemplateLoc);
+ Diag(TemplateId->TemplateNameLoc,
+ diag::err_explicit_instantiation_with_definition)
+ << SourceRange(TemplateInfo.TemplateLoc)
+ << CodeModificationHint::CreateInsertion(LAngleLoc, "<>");
+
+ // Create a fake template parameter list that contains only
+ // "template<>", so that we treat this construct as a class
+ // template specialization.
+ FakedParamLists.push_back(
+ Actions.ActOnTemplateParameterList(0, SourceLocation(),
+ TemplateInfo.TemplateLoc,
+ LAngleLoc,
+ 0, 0,
+ LAngleLoc));
+ TemplateParams = &FakedParamLists;
+ }
+
+ // Build the class template specialization.
+ TagOrTempResult
+ = Actions.ActOnClassTemplateSpecialization(CurScope, TagType, TK,
+ StartLoc, SS,
+ TemplateTy::make(TemplateId->Template),
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->getTemplateArgLocations(),
+ TemplateId->RAngleLoc,
+ Attr,
+ Action::MultiTemplateParamsArg(Actions,
+ TemplateParams? &(*TemplateParams)[0] : 0,
+ TemplateParams? TemplateParams->size() : 0));
+ }
+ TemplateId->Destroy();
+ } else if (TemplateParams && TK != Action::TK_Reference) {
+ // Class template declaration or definition.
+ TagOrTempResult = Actions.ActOnClassTemplate(CurScope, TagType, TK,
+ StartLoc, SS, Name, NameLoc,
+ Attr,
+ Action::MultiTemplateParamsArg(Actions,
+ &(*TemplateParams)[0],
+ TemplateParams->size()),
+ AS);
+ } else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TK == Action::TK_Declaration) {
+ // Explicit instantiation of a member of a class template
+ // specialization, e.g.,
+ //
+ // template struct Outer<int>::Inner;
+ //
+ TagOrTempResult
+ = Actions.ActOnExplicitInstantiation(CurScope,
+ TemplateInfo.TemplateLoc,
+ TagType, StartLoc, SS, Name,
+ NameLoc, Attr);
+ } else {
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TK == Action::TK_Definition) {
+ // FIXME: Diagnose this particular error.
+ }
+
+ // Declaration or definition of a class type
+ TagOrTempResult = Actions.ActOnTag(CurScope, TagType, TK, StartLoc, SS,
+ Name, NameLoc, Attr, AS, Owned);
+ }
+
+ // Parse the optional base clause (C++ only).
+ if (getLang().CPlusPlus && Tok.is(tok::colon))
+ ParseBaseClause(TagOrTempResult.get());
+
+ // If there is a body, parse it and inform the actions module.
+ if (Tok.is(tok::l_brace))
+ if (getLang().CPlusPlus)
+ ParseCXXMemberSpecification(StartLoc, TagType, TagOrTempResult.get());
+ else
+ ParseStructUnionBody(StartLoc, TagType, TagOrTempResult.get());
+ else if (TK == Action::TK_Definition) {
+ // FIXME: Complain that we have a base-specifier list but no
+ // definition.
+ Diag(Tok, diag::err_expected_lbrace);
+ }
+
+ const char *PrevSpec = 0;
+ if (TagOrTempResult.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ if (DS.SetTypeSpecType(TagType, StartLoc, PrevSpec,
+ TagOrTempResult.get().getAs<void>(), Owned))
+ Diag(StartLoc, diag::err_invalid_decl_spec_combination) << PrevSpec;
+
+ if (DS.isFriendSpecified())
+ Actions.ActOnFriendDecl(CurScope, DS.getFriendSpecLoc(),
+ TagOrTempResult.get());
+}
+
+/// ParseBaseClause - Parse the base-clause of a C++ class [C++ class.derived].
+///
+/// base-clause : [C++ class.derived]
+/// ':' base-specifier-list
+/// base-specifier-list:
+/// base-specifier '...'[opt]
+/// base-specifier-list ',' base-specifier '...'[opt]
+void Parser::ParseBaseClause(DeclPtrTy ClassDecl) {
+ assert(Tok.is(tok::colon) && "Not a base clause");
+ ConsumeToken();
+
+ // Build up an array of parsed base specifiers.
+ llvm::SmallVector<BaseTy *, 8> BaseInfo;
+
+ while (true) {
+ // Parse a base-specifier.
+ BaseResult Result = ParseBaseSpecifier(ClassDecl);
+ if (Result.isInvalid()) {
+ // Skip the rest of this base specifier, up until the comma or
+ // opening brace.
+ SkipUntil(tok::comma, tok::l_brace, true, true);
+ } else {
+ // Add this to our array of base specifiers.
+ BaseInfo.push_back(Result.get());
+ }
+
+ // If the next token is a comma, consume it and keep reading
+ // base-specifiers.
+ if (Tok.isNot(tok::comma)) break;
+
+ // Consume the comma.
+ ConsumeToken();
+ }
+
+ // Attach the base specifiers
+ Actions.ActOnBaseSpecifiers(ClassDecl, BaseInfo.data(), BaseInfo.size());
+}
+
+/// ParseBaseSpecifier - Parse a C++ base-specifier. A base-specifier is
+/// one entry in the base class list of a class specifier, for example:
+/// class foo : public bar, virtual private baz {
+/// 'public bar' and 'virtual private baz' are each base-specifiers.
+///
+/// base-specifier: [C++ class.derived]
+/// ::[opt] nested-name-specifier[opt] class-name
+/// 'virtual' access-specifier[opt] ::[opt] nested-name-specifier[opt]
+/// class-name
+/// access-specifier 'virtual'[opt] ::[opt] nested-name-specifier[opt]
+/// class-name
+Parser::BaseResult Parser::ParseBaseSpecifier(DeclPtrTy ClassDecl) {
+ bool IsVirtual = false;
+ SourceLocation StartLoc = Tok.getLocation();
+
+ // Parse the 'virtual' keyword.
+ if (Tok.is(tok::kw_virtual)) {
+ ConsumeToken();
+ IsVirtual = true;
+ }
+
+ // Parse an (optional) access specifier.
+ AccessSpecifier Access = getAccessSpecifierIfPresent();
+ if (Access)
+ ConsumeToken();
+
+ // Parse the 'virtual' keyword (again!), in case it came after the
+ // access specifier.
+ if (Tok.is(tok::kw_virtual)) {
+ SourceLocation VirtualLoc = ConsumeToken();
+ if (IsVirtual) {
+ // Complain about duplicate 'virtual'
+ Diag(VirtualLoc, diag::err_dup_virtual)
+ << CodeModificationHint::CreateRemoval(SourceRange(VirtualLoc));
+ }
+
+ IsVirtual = true;
+ }
+
+ // Parse optional '::' and optional nested-name-specifier.
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS);
+
+ // The location of the base class itself.
+ SourceLocation BaseLoc = Tok.getLocation();
+
+ // Parse the class-name.
+ SourceLocation EndLocation;
+ TypeResult BaseType = ParseClassName(EndLocation, &SS);
+ if (BaseType.isInvalid())
+ return true;
+
+ // Find the complete source range for the base-specifier.
+ SourceRange Range(StartLoc, EndLocation);
+
+ // Notify semantic analysis that we have parsed a complete
+ // base-specifier.
+ return Actions.ActOnBaseSpecifier(ClassDecl, Range, IsVirtual, Access,
+ BaseType.get(), BaseLoc);
+}
+
+/// getAccessSpecifierIfPresent - Determine whether the next token is
+/// a C++ access-specifier.
+///
+/// access-specifier: [C++ class.derived]
+/// 'private'
+/// 'protected'
+/// 'public'
+AccessSpecifier Parser::getAccessSpecifierIfPresent() const
+{
+ switch (Tok.getKind()) {
+ default: return AS_none;
+ case tok::kw_private: return AS_private;
+ case tok::kw_protected: return AS_protected;
+ case tok::kw_public: return AS_public;
+ }
+}
+
+/// ParseCXXClassMemberDeclaration - Parse a C++ class member declaration.
+///
+/// member-declaration:
+/// decl-specifier-seq[opt] member-declarator-list[opt] ';'
+/// function-definition ';'[opt]
+/// ::[opt] nested-name-specifier template[opt] unqualified-id ';'[TODO]
+/// using-declaration [TODO]
+/// [C++0x] static_assert-declaration
+/// template-declaration
+/// [GNU] '__extension__' member-declaration
+///
+/// member-declarator-list:
+/// member-declarator
+/// member-declarator-list ',' member-declarator
+///
+/// member-declarator:
+/// declarator pure-specifier[opt]
+/// declarator constant-initializer[opt]
+/// identifier[opt] ':' constant-expression
+///
+/// pure-specifier:
+/// '= 0'
+///
+/// constant-initializer:
+/// '=' constant-expression
+///
+void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS) {
+ // static_assert-declaration
+ if (Tok.is(tok::kw_static_assert)) {
+ SourceLocation DeclEnd;
+ ParseStaticAssertDeclaration(DeclEnd);
+ return;
+ }
+
+ if (Tok.is(tok::kw_template)) {
+ SourceLocation DeclEnd;
+ ParseDeclarationStartingWithTemplate(Declarator::MemberContext, DeclEnd,
+ AS);
+ return;
+ }
+
+ // Handle: member-declaration ::= '__extension__' member-declaration
+ if (Tok.is(tok::kw___extension__)) {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseCXXClassMemberDeclaration(AS);
+ }
+
+ SourceLocation DSStart = Tok.getLocation();
+ // decl-specifier-seq:
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS;
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS);
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ // C++ 9.2p7: The member-declarator-list can be omitted only after a
+ // class-specifier or an enum-specifier or in a friend declaration.
+ // FIXME: Friend declarations.
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_struct:
+ case DeclSpec::TST_union:
+ case DeclSpec::TST_class:
+ case DeclSpec::TST_enum:
+ Actions.ParsedFreeStandingDeclSpec(CurScope, DS);
+ return;
+ default:
+ Diag(DSStart, diag::err_no_declarators);
+ return;
+ }
+ }
+
+ Declarator DeclaratorInfo(DS, Declarator::MemberContext);
+
+ if (Tok.isNot(tok::colon)) {
+ // Parse the first declarator.
+ ParseDeclarator(DeclaratorInfo);
+ // Error parsing the declarator?
+ if (!DeclaratorInfo.hasName()) {
+ // If so, skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return;
+ }
+
+ // function-definition:
+ if (Tok.is(tok::l_brace)
+ || (DeclaratorInfo.isFunctionDeclarator() &&
+ (Tok.is(tok::colon) || Tok.is(tok::kw_try)))) {
+ if (!DeclaratorInfo.isFunctionDeclarator()) {
+ Diag(Tok, diag::err_func_def_no_params);
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, true);
+ return;
+ }
+
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(Tok, diag::err_function_declared_typedef);
+ // This recovery skips the entire function body. It would be nice
+ // to simply call ParseCXXInlineMethodDef() below, however Sema
+ // assumes the declarator represents a function, not a typedef.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, true);
+ return;
+ }
+
+ ParseCXXInlineMethodDef(AS, DeclaratorInfo);
+ return;
+ }
+ }
+
+ // member-declarator-list:
+ // member-declarator
+ // member-declarator-list ',' member-declarator
+
+ llvm::SmallVector<DeclPtrTy, 8> DeclsInGroup;
+ OwningExprResult BitfieldSize(Actions);
+ OwningExprResult Init(Actions);
+ bool Deleted = false;
+
+ while (1) {
+
+ // member-declarator:
+ // declarator pure-specifier[opt]
+ // declarator constant-initializer[opt]
+ // identifier[opt] ':' constant-expression
+
+ if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ BitfieldSize = ParseConstantExpression();
+ if (BitfieldSize.isInvalid())
+ SkipUntil(tok::comma, true, true);
+ }
+
+ // pure-specifier:
+ // '= 0'
+ //
+ // constant-initializer:
+ // '=' constant-expression
+ //
+ // defaulted/deleted function-definition:
+ // '=' 'default' [TODO]
+ // '=' 'delete'
+
+ if (Tok.is(tok::equal)) {
+ ConsumeToken();
+ if (getLang().CPlusPlus0x && Tok.is(tok::kw_delete)) {
+ ConsumeToken();
+ Deleted = true;
+ } else {
+ Init = ParseInitializer();
+ if (Init.isInvalid())
+ SkipUntil(tok::comma, true, true);
+ }
+ }
+
+ // If attributes exist after the declarator, parse them.
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ DeclaratorInfo.AddAttributes(AttrList, Loc);
+ }
+
+ // NOTE: If Sema is the Action module and declarator is an instance field,
+ // this call will *not* return the created decl; It will return null.
+ // See Sema::ActOnCXXMemberDeclarator for details.
+ DeclPtrTy ThisDecl = Actions.ActOnCXXMemberDeclarator(CurScope, AS,
+ DeclaratorInfo,
+ BitfieldSize.release(),
+ Init.release(),
+ Deleted);
+ if (ThisDecl)
+ DeclsInGroup.push_back(ThisDecl);
+
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_typedef) {
+ // We just declared a member function. If this member function
+ // has any default arguments, we'll need to parse them later.
+ LateParsedMethodDeclaration *LateMethod = 0;
+ DeclaratorChunk::FunctionTypeInfo &FTI
+ = DeclaratorInfo.getTypeObject(0).Fun;
+ for (unsigned ParamIdx = 0; ParamIdx < FTI.NumArgs; ++ParamIdx) {
+ if (LateMethod || FTI.ArgInfo[ParamIdx].DefaultArgTokens) {
+ if (!LateMethod) {
+ // Push this method onto the stack of late-parsed method
+ // declarations.
+ getCurrentClass().MethodDecls.push_back(
+ LateParsedMethodDeclaration(ThisDecl));
+ LateMethod = &getCurrentClass().MethodDecls.back();
+
+ // Add all of the parameters prior to this one (they don't
+ // have default arguments).
+ LateMethod->DefaultArgs.reserve(FTI.NumArgs);
+ for (unsigned I = 0; I < ParamIdx; ++I)
+ LateMethod->DefaultArgs.push_back(
+ LateParsedDefaultArgument(FTI.ArgInfo[ParamIdx].Param));
+ }
+
+ // Add this parameter to the list of parameters (it or may
+ // not have a default argument).
+ LateMethod->DefaultArgs.push_back(
+ LateParsedDefaultArgument(FTI.ArgInfo[ParamIdx].Param,
+ FTI.ArgInfo[ParamIdx].DefaultArgTokens));
+ }
+ }
+ }
+
+ // If we don't have a comma, it is either the end of the list (a ';')
+ // or an error, bail out.
+ if (Tok.isNot(tok::comma))
+ break;
+
+ // Consume the comma.
+ ConsumeToken();
+
+ // Parse the next declarator.
+ DeclaratorInfo.clear();
+ BitfieldSize = 0;
+ Init = 0;
+ Deleted = false;
+
+ // Attributes are only allowed on the second declarator.
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ DeclaratorInfo.AddAttributes(AttrList, Loc);
+ }
+
+ if (Tok.isNot(tok::colon))
+ ParseDeclarator(DeclaratorInfo);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ Actions.FinalizeDeclaratorGroup(CurScope, DS, DeclsInGroup.data(),
+ DeclsInGroup.size());
+ return;
+ }
+
+ Diag(Tok, diag::err_expected_semi_decl_list);
+ // Skip to end of block or statement
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return;
+}
+
+/// ParseCXXMemberSpecification - Parse the class definition.
+///
+/// member-specification:
+/// member-declaration member-specification[opt]
+/// access-specifier ':' member-specification[opt]
+///
+void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
+ unsigned TagType, DeclPtrTy TagDecl) {
+ assert((TagType == DeclSpec::TST_struct ||
+ TagType == DeclSpec::TST_union ||
+ TagType == DeclSpec::TST_class) && "Invalid TagType!");
+
+ PrettyStackTraceActionsDecl CrashInfo(TagDecl, RecordLoc, Actions,
+ PP.getSourceManager(),
+ "parsing struct/union/class body");
+
+ SourceLocation LBraceLoc = ConsumeBrace();
+
+ // Determine whether this is a top-level (non-nested) class.
+ bool TopLevelClass = ClassStack.empty() ||
+ CurScope->isInCXXInlineMethodScope();
+
+ // Enter a scope for the class.
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+
+ // Note that we are parsing a new (potentially-nested) class definition.
+ ParsingClassDefinition ParsingDef(*this, TagDecl, TopLevelClass);
+
+ if (TagDecl)
+ Actions.ActOnTagStartDefinition(CurScope, TagDecl);
+ else {
+ SkipUntil(tok::r_brace, false, false);
+ return;
+ }
+
+ // C++ 11p3: Members of a class defined with the keyword class are private
+ // by default. Members of a class defined with the keywords struct or union
+ // are public by default.
+ AccessSpecifier CurAS;
+ if (TagType == DeclSpec::TST_class)
+ CurAS = AS_private;
+ else
+ CurAS = AS_public;
+
+ // While we still have something to read, read the member-declarations.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one member-declaration.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_struct_semi);
+ ConsumeToken();
+ continue;
+ }
+
+ AccessSpecifier AS = getAccessSpecifierIfPresent();
+ if (AS != AS_none) {
+ // Current token is a C++ access specifier.
+ CurAS = AS;
+ ConsumeToken();
+ ExpectAndConsume(tok::colon, diag::err_expected_colon);
+ continue;
+ }
+
+ // Parse all the comma separated declarators.
+ ParseCXXClassMemberDeclaration(CurAS);
+ }
+
+ SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
+
+ AttributeList *AttrList = 0;
+ // If attributes exist after class contents, parse them.
+ if (Tok.is(tok::kw___attribute))
+ AttrList = ParseAttributes(); // FIXME: where should I put them?
+
+ Actions.ActOnFinishCXXMemberSpecification(CurScope, RecordLoc, TagDecl,
+ LBraceLoc, RBraceLoc);
+
+ // C++ 9.2p2: Within the class member-specification, the class is regarded as
+ // complete within function bodies, default arguments,
+ // exception-specifications, and constructor ctor-initializers (including
+ // such things in nested classes).
+ //
+ // FIXME: Only function bodies and constructor ctor-initializers are
+ // parsed correctly, fix the rest.
+ if (TopLevelClass) {
+ // We are not inside a nested class. This class and its nested classes
+ // are complete and we can parse the delayed portions of method
+ // declarations and the lexed inline method definitions.
+ ParseLexedMethodDeclarations(getCurrentClass());
+ ParseLexedMethodDefs(getCurrentClass());
+ }
+
+ // Leave the class scope.
+ ParsingDef.Pop();
+ ClassScope.Exit();
+
+ Actions.ActOnTagFinishDefinition(CurScope, TagDecl);
+}
+
+/// ParseConstructorInitializer - Parse a C++ constructor initializer,
+/// which explicitly initializes the members or base classes of a
+/// class (C++ [class.base.init]). For example, the three initializers
+/// after the ':' in the Derived constructor below:
+///
+/// @code
+/// class Base { };
+/// class Derived : Base {
+/// int x;
+/// float f;
+/// public:
+/// Derived(float f) : Base(), x(17), f(f) { }
+/// };
+/// @endcode
+///
+/// [C++] ctor-initializer:
+/// ':' mem-initializer-list
+///
+/// [C++] mem-initializer-list:
+/// mem-initializer
+/// mem-initializer , mem-initializer-list
+void Parser::ParseConstructorInitializer(DeclPtrTy ConstructorDecl) {
+ assert(Tok.is(tok::colon) && "Constructor initializer always starts with ':'");
+
+ SourceLocation ColonLoc = ConsumeToken();
+
+ llvm::SmallVector<MemInitTy*, 4> MemInitializers;
+
+ do {
+ MemInitResult MemInit = ParseMemInitializer(ConstructorDecl);
+ if (!MemInit.isInvalid())
+ MemInitializers.push_back(MemInit.get());
+
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else if (Tok.is(tok::l_brace))
+ break;
+ else {
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ Diag(Tok.getLocation(), diag::err_expected_lbrace_or_comma);
+ SkipUntil(tok::l_brace, true, true);
+ break;
+ }
+ } while (true);
+
+ Actions.ActOnMemInitializers(ConstructorDecl, ColonLoc,
+ MemInitializers.data(), MemInitializers.size());
+}
+
+/// ParseMemInitializer - Parse a C++ member initializer, which is
+/// part of a constructor initializer that explicitly initializes one
+/// member or base class (C++ [class.base.init]). See
+/// ParseConstructorInitializer for an example.
+///
+/// [C++] mem-initializer:
+/// mem-initializer-id '(' expression-list[opt] ')'
+///
+/// [C++] mem-initializer-id:
+/// '::'[opt] nested-name-specifier[opt] class-name
+/// identifier
+Parser::MemInitResult Parser::ParseMemInitializer(DeclPtrTy ConstructorDecl) {
+ // FIXME: parse '::'[opt] nested-name-specifier[opt]
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_member_or_base_name);
+ return true;
+ }
+
+ // Get the identifier. This may be a member name or a class name,
+ // but we'll let the semantic analysis determine which it is.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+
+ // Parse the '('.
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen);
+ return true;
+ }
+ SourceLocation LParenLoc = ConsumeParen();
+
+ // Parse the optional expression-list.
+ ExprVector ArgExprs(Actions);
+ CommaLocsTy CommaLocs;
+ if (Tok.isNot(tok::r_paren) && ParseExpressionList(ArgExprs, CommaLocs)) {
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ return Actions.ActOnMemInitializer(ConstructorDecl, CurScope, II, IdLoc,
+ LParenLoc, ArgExprs.take(),
+ ArgExprs.size(), CommaLocs.data(),
+ RParenLoc);
+}
+
+/// ParseExceptionSpecification - Parse a C++ exception-specification
+/// (C++ [except.spec]).
+///
+/// exception-specification:
+/// 'throw' '(' type-id-list [opt] ')'
+/// [MS] 'throw' '(' '...' ')'
+///
+/// type-id-list:
+/// type-id
+/// type-id-list ',' type-id
+///
+bool Parser::ParseExceptionSpecification(SourceLocation &EndLoc,
+ llvm::SmallVector<TypeTy*, 2>
+ &Exceptions,
+ llvm::SmallVector<SourceRange, 2>
+ &Ranges,
+ bool &hasAnyExceptionSpec) {
+ assert(Tok.is(tok::kw_throw) && "expected throw");
+
+ SourceLocation ThrowLoc = ConsumeToken();
+
+ if (!Tok.is(tok::l_paren)) {
+ return Diag(Tok, diag::err_expected_lparen_after) << "throw";
+ }
+ SourceLocation LParenLoc = ConsumeParen();
+
+ // Parse throw(...), a Microsoft extension that means "this function
+ // can throw anything".
+ if (Tok.is(tok::ellipsis)) {
+ hasAnyExceptionSpec = true;
+ SourceLocation EllipsisLoc = ConsumeToken();
+ if (!getLang().Microsoft)
+ Diag(EllipsisLoc, diag::ext_ellipsis_exception_spec);
+ EndLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ return false;
+ }
+
+ // Parse the sequence of type-ids.
+ SourceRange Range;
+ while (Tok.isNot(tok::r_paren)) {
+ TypeResult Res(ParseTypeName(&Range));
+ if (!Res.isInvalid()) {
+ Exceptions.push_back(Res.get());
+ Ranges.push_back(Range);
+ }
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else
+ break;
+ }
+
+ EndLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ return false;
+}
+
+/// \brief We have just started parsing the definition of a new class,
+/// so push that class onto our stack of classes that is currently
+/// being parsed.
+void Parser::PushParsingClass(DeclPtrTy ClassDecl, bool TopLevelClass) {
+ assert((TopLevelClass || !ClassStack.empty()) &&
+ "Nested class without outer class");
+ ClassStack.push(new ParsingClass(ClassDecl, TopLevelClass));
+}
+
+/// \brief Deallocate the given parsed class and all of its nested
+/// classes.
+void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
+ for (unsigned I = 0, N = Class->NestedClasses.size(); I != N; ++I)
+ DeallocateParsedClasses(Class->NestedClasses[I]);
+ delete Class;
+}
+
+/// \brief Pop the top class of the stack of classes that are
+/// currently being parsed.
+///
+/// This routine should be called when we have finished parsing the
+/// definition of a class, but have not yet popped the Scope
+/// associated with the class's definition.
+///
+/// \returns true if the class we've popped is a top-level class,
+/// false otherwise.
+void Parser::PopParsingClass() {
+ assert(!ClassStack.empty() && "Mismatched push/pop for class parsing");
+
+ ParsingClass *Victim = ClassStack.top();
+ ClassStack.pop();
+ if (Victim->TopLevelClass) {
+ // Deallocate all of the nested classes of this class,
+ // recursively: we don't need to keep any of this information.
+ DeallocateParsedClasses(Victim);
+ return;
+ }
+ assert(!ClassStack.empty() && "Missing top-level class?");
+
+ if (Victim->MethodDecls.empty() && Victim->MethodDefs.empty() &&
+ Victim->NestedClasses.empty()) {
+ // The victim is a nested class, but we will not need to perform
+ // any processing after the definition of this class since it has
+ // no members whose handling was delayed. Therefore, we can just
+ // remove this nested class.
+ delete Victim;
+ return;
+ }
+
+ // This nested class has some members that will need to be processed
+ // after the top-level class is completely defined. Therefore, add
+ // it to the list of nested classes within its parent.
+ assert(CurScope->isClassScope() && "Nested class outside of class scope?");
+ ClassStack.top()->NestedClasses.push_back(Victim);
+ Victim->TemplateScope = CurScope->getParent()->isTemplateParamScope();
+}
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
new file mode 100644
index 0000000..cd62c64
--- /dev/null
+++ b/lib/Parse/ParseExpr.cpp
@@ -0,0 +1,1514 @@
+//===--- ParseExpr.cpp - Expression Parsing -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expression parsing implementation. Expressions in
+// C99 basically consist of a bunch of binary operators with unary operators and
+// other random stuff at the leaves.
+//
+// In the C99 grammar, these unary operators bind tightest and are represented
+// as the 'cast-expression' production. Everything else is either a binary
+// operator (e.g. '/') or a ternary operator ("?:"). The unary leaves are
+// handled by ParseCastExpression, the higher level pieces are handled by
+// ParseBinaryExpression.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "ExtensionRAIIObject.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+/// PrecedenceLevels - These are precedences for the binary/ternary operators in
+/// the C99 grammar. These have been named to relate with the C99 grammar
+/// productions. Low precedences numbers bind more weakly than high numbers.
+namespace prec {
+ enum Level {
+ Unknown = 0, // Not binary operator.
+ Comma = 1, // ,
+ Assignment = 2, // =, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=
+ Conditional = 3, // ?
+ LogicalOr = 4, // ||
+ LogicalAnd = 5, // &&
+ InclusiveOr = 6, // |
+ ExclusiveOr = 7, // ^
+ And = 8, // &
+ Equality = 9, // ==, !=
+ Relational = 10, // >=, <=, >, <
+ Shift = 11, // <<, >>
+ Additive = 12, // -, +
+ Multiplicative = 13, // *, /, %
+ PointerToMember = 14 // .*, ->*
+ };
+}
+
+
+/// getBinOpPrecedence - Return the precedence of the specified binary operator
+/// token. This returns:
+///
+static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
+ bool GreaterThanIsOperator,
+ bool CPlusPlus0x) {
+ switch (Kind) {
+ case tok::greater:
+ // C++ [temp.names]p3:
+ // [...] When parsing a template-argument-list, the first
+ // non-nested > is taken as the ending delimiter rather than a
+ // greater-than operator. [...]
+ if (GreaterThanIsOperator)
+ return prec::Relational;
+ return prec::Unknown;
+
+ case tok::greatergreater:
+ // C++0x [temp.names]p3:
+ //
+ // [...] Similarly, the first non-nested >> is treated as two
+ // consecutive but distinct > tokens, the first of which is
+ // taken as the end of the template-argument-list and completes
+ // the template-id. [...]
+ if (GreaterThanIsOperator || !CPlusPlus0x)
+ return prec::Shift;
+ return prec::Unknown;
+
+ default: return prec::Unknown;
+ case tok::comma: return prec::Comma;
+ case tok::equal:
+ case tok::starequal:
+ case tok::slashequal:
+ case tok::percentequal:
+ case tok::plusequal:
+ case tok::minusequal:
+ case tok::lesslessequal:
+ case tok::greatergreaterequal:
+ case tok::ampequal:
+ case tok::caretequal:
+ case tok::pipeequal: return prec::Assignment;
+ case tok::question: return prec::Conditional;
+ case tok::pipepipe: return prec::LogicalOr;
+ case tok::ampamp: return prec::LogicalAnd;
+ case tok::pipe: return prec::InclusiveOr;
+ case tok::caret: return prec::ExclusiveOr;
+ case tok::amp: return prec::And;
+ case tok::exclaimequal:
+ case tok::equalequal: return prec::Equality;
+ case tok::lessequal:
+ case tok::less:
+ case tok::greaterequal: return prec::Relational;
+ case tok::lessless: return prec::Shift;
+ case tok::plus:
+ case tok::minus: return prec::Additive;
+ case tok::percent:
+ case tok::slash:
+ case tok::star: return prec::Multiplicative;
+ case tok::periodstar:
+ case tok::arrowstar: return prec::PointerToMember;
+ }
+}
+
+
+/// ParseExpression - Simple precedence-based parser for binary/ternary
+/// operators.
+///
+/// Note: we diverge from the C99 grammar when parsing the assignment-expression
+/// production. C99 specifies that the LHS of an assignment operator should be
+/// parsed as a unary-expression, but consistency dictates that it be a
+/// conditional-expession. In practice, the important thing here is that the
+/// LHS of an assignment has to be an l-value, which productions between
+/// unary-expression and conditional-expression don't produce. Because we want
+/// consistency, we parse the LHS as a conditional-expression, then check for
+/// l-value-ness in semantic analysis stages.
+///
+/// pm-expression: [C++ 5.5]
+/// cast-expression
+/// pm-expression '.*' cast-expression
+/// pm-expression '->*' cast-expression
+///
+/// multiplicative-expression: [C99 6.5.5]
+/// Note: in C++, apply pm-expression instead of cast-expression
+/// cast-expression
+/// multiplicative-expression '*' cast-expression
+/// multiplicative-expression '/' cast-expression
+/// multiplicative-expression '%' cast-expression
+///
+/// additive-expression: [C99 6.5.6]
+/// multiplicative-expression
+/// additive-expression '+' multiplicative-expression
+/// additive-expression '-' multiplicative-expression
+///
+/// shift-expression: [C99 6.5.7]
+/// additive-expression
+/// shift-expression '<<' additive-expression
+/// shift-expression '>>' additive-expression
+///
+/// relational-expression: [C99 6.5.8]
+/// shift-expression
+/// relational-expression '<' shift-expression
+/// relational-expression '>' shift-expression
+/// relational-expression '<=' shift-expression
+/// relational-expression '>=' shift-expression
+///
+/// equality-expression: [C99 6.5.9]
+/// relational-expression
+/// equality-expression '==' relational-expression
+/// equality-expression '!=' relational-expression
+///
+/// AND-expression: [C99 6.5.10]
+/// equality-expression
+/// AND-expression '&' equality-expression
+///
+/// exclusive-OR-expression: [C99 6.5.11]
+/// AND-expression
+/// exclusive-OR-expression '^' AND-expression
+///
+/// inclusive-OR-expression: [C99 6.5.12]
+/// exclusive-OR-expression
+/// inclusive-OR-expression '|' exclusive-OR-expression
+///
+/// logical-AND-expression: [C99 6.5.13]
+/// inclusive-OR-expression
+/// logical-AND-expression '&&' inclusive-OR-expression
+///
+/// logical-OR-expression: [C99 6.5.14]
+/// logical-AND-expression
+/// logical-OR-expression '||' logical-AND-expression
+///
+/// conditional-expression: [C99 6.5.15]
+/// logical-OR-expression
+/// logical-OR-expression '?' expression ':' conditional-expression
+/// [GNU] logical-OR-expression '?' ':' conditional-expression
+/// [C++] the third operand is an assignment-expression
+///
+/// assignment-expression: [C99 6.5.16]
+/// conditional-expression
+/// unary-expression assignment-operator assignment-expression
+/// [C++] throw-expression [C++ 15]
+///
+/// assignment-operator: one of
+/// = *= /= %= += -= <<= >>= &= ^= |=
+///
+/// expression: [C99 6.5.17]
+/// assignment-expression
+/// expression ',' assignment-expression
+///
+Parser::OwningExprResult Parser::ParseExpression() {
+ OwningExprResult LHS(ParseAssignmentExpression());
+ if (LHS.isInvalid()) return move(LHS);
+
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+}
+
+/// This routine is called when the '@' is seen and consumed.
+/// Current token is an Identifier and is not a 'try'. This
+/// routine is necessary to disambiguate @try-statement from,
+/// for example, @encode-expression.
+///
+Parser::OwningExprResult
+Parser::ParseExpressionWithLeadingAt(SourceLocation AtLoc) {
+ OwningExprResult LHS(ParseObjCAtExpression(AtLoc));
+ if (LHS.isInvalid()) return move(LHS);
+
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+}
+
+/// This routine is called when a leading '__extension__' is seen and
+/// consumed. This is necessary because the token gets consumed in the
+/// process of disambiguating between an expression and a declaration.
+Parser::OwningExprResult
+Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
+ OwningExprResult LHS(Actions, true);
+ {
+ // Silence extension warnings in the sub-expression
+ ExtensionRAIIObject O(Diags);
+
+ LHS = ParseCastExpression(false);
+ if (LHS.isInvalid()) return move(LHS);
+ }
+
+ LHS = Actions.ActOnUnaryOp(CurScope, ExtLoc, tok::kw___extension__,
+ move(LHS));
+ if (LHS.isInvalid()) return move(LHS);
+
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+}
+
+/// ParseAssignmentExpression - Parse an expr that doesn't include commas.
+///
+Parser::OwningExprResult Parser::ParseAssignmentExpression() {
+ if (Tok.is(tok::kw_throw))
+ return ParseThrowExpression();
+
+ OwningExprResult LHS(ParseCastExpression(false));
+ if (LHS.isInvalid()) return move(LHS);
+
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Assignment);
+}
+
+/// ParseAssignmentExprWithObjCMessageExprStart - Parse an assignment expression
+/// where part of an objc message send has already been parsed. In this case
+/// LBracLoc indicates the location of the '[' of the message send, and either
+/// ReceiverName or ReceiverExpr is non-null indicating the receiver of the
+/// message.
+///
+/// Since this handles full assignment-expression's, it handles postfix
+/// expressions and other binary operators for these expressions as well.
+Parser::OwningExprResult
+Parser::ParseAssignmentExprWithObjCMessageExprStart(SourceLocation LBracLoc,
+ SourceLocation NameLoc,
+ IdentifierInfo *ReceiverName,
+ ExprArg ReceiverExpr) {
+ OwningExprResult R(ParseObjCMessageExpressionBody(LBracLoc, NameLoc,
+ ReceiverName,
+ move(ReceiverExpr)));
+ if (R.isInvalid()) return move(R);
+ R = ParsePostfixExpressionSuffix(move(R));
+ if (R.isInvalid()) return move(R);
+ return ParseRHSOfBinaryExpression(move(R), prec::Assignment);
+}
+
+
+Parser::OwningExprResult Parser::ParseConstantExpression() {
+ OwningExprResult LHS(ParseCastExpression(false));
+ if (LHS.isInvalid()) return move(LHS);
+
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Conditional);
+}
+
+/// ParseRHSOfBinaryExpression - Parse a binary expression that starts with
+/// LHS and has a precedence of at least MinPrec.
+Parser::OwningExprResult
+Parser::ParseRHSOfBinaryExpression(OwningExprResult LHS, unsigned MinPrec) {
+ unsigned NextTokPrec = getBinOpPrecedence(Tok.getKind(),
+ GreaterThanIsOperator,
+ getLang().CPlusPlus0x);
+ SourceLocation ColonLoc;
+
+ while (1) {
+ // If this token has a lower precedence than we are allowed to parse (e.g.
+ // because we are called recursively, or because the token is not a binop),
+ // then we are done!
+ if (NextTokPrec < MinPrec)
+ return move(LHS);
+
+ // Consume the operator, saving the operator token for error reporting.
+ Token OpToken = Tok;
+ ConsumeToken();
+
+ // Special case handling for the ternary operator.
+ OwningExprResult TernaryMiddle(Actions, true);
+ if (NextTokPrec == prec::Conditional) {
+ if (Tok.isNot(tok::colon)) {
+ // Handle this production specially:
+ // logical-OR-expression '?' expression ':' conditional-expression
+ // In particular, the RHS of the '?' is 'expression', not
+ // 'logical-OR-expression' as we might expect.
+ TernaryMiddle = ParseExpression();
+ if (TernaryMiddle.isInvalid())
+ return move(TernaryMiddle);
+ } else {
+ // Special case handling of "X ? Y : Z" where Y is empty:
+ // logical-OR-expression '?' ':' conditional-expression [GNU]
+ TernaryMiddle = 0;
+ Diag(Tok, diag::ext_gnu_conditional_expr);
+ }
+
+ if (Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_expected_colon);
+ Diag(OpToken, diag::note_matching) << "?";
+ return ExprError();
+ }
+
+ // Eat the colon.
+ ColonLoc = ConsumeToken();
+ }
+
+ // Parse another leaf here for the RHS of the operator.
+ // ParseCastExpression works here because all RHS expressions in C have it
+ // as a prefix, at least. However, in C++, an assignment-expression could
+ // be a throw-expression, which is not a valid cast-expression.
+ // Therefore we need some special-casing here.
+ // Also note that the third operand of the conditional operator is
+ // an assignment-expression in C++.
+ OwningExprResult RHS(Actions);
+ if (getLang().CPlusPlus && NextTokPrec <= prec::Conditional)
+ RHS = ParseAssignmentExpression();
+ else
+ RHS = ParseCastExpression(false);
+ if (RHS.isInvalid())
+ return move(RHS);
+
+ // Remember the precedence of this operator and get the precedence of the
+ // operator immediately to the right of the RHS.
+ unsigned ThisPrec = NextTokPrec;
+ NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
+ getLang().CPlusPlus0x);
+
+ // Assignment and conditional expressions are right-associative.
+ bool isRightAssoc = ThisPrec == prec::Conditional ||
+ ThisPrec == prec::Assignment;
+
+ // Get the precedence of the operator to the right of the RHS. If it binds
+ // more tightly with RHS than we do, evaluate it completely first.
+ if (ThisPrec < NextTokPrec ||
+ (ThisPrec == NextTokPrec && isRightAssoc)) {
+ // If this is left-associative, only parse things on the RHS that bind
+ // more tightly than the current operator. If it is left-associative, it
+ // is okay, to bind exactly as tightly. For example, compile A=B=C=D as
+ // A=(B=(C=D)), where each paren is a level of recursion here.
+ // The function takes ownership of the RHS.
+ RHS = ParseRHSOfBinaryExpression(move(RHS), ThisPrec + !isRightAssoc);
+ if (RHS.isInvalid())
+ return move(RHS);
+
+ NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
+ getLang().CPlusPlus0x);
+ }
+ assert(NextTokPrec <= ThisPrec && "Recursion didn't work!");
+
+ if (!LHS.isInvalid()) {
+ // Combine the LHS and RHS into the LHS (e.g. build AST).
+ if (TernaryMiddle.isInvalid()) {
+ // If we're using '>>' as an operator within a template
+ // argument list (in C++98), suggest the addition of
+ // parentheses so that the code remains well-formed in C++0x.
+ if (!GreaterThanIsOperator && OpToken.is(tok::greatergreater))
+ SuggestParentheses(OpToken.getLocation(),
+ diag::warn_cxx0x_right_shift_in_template_arg,
+ SourceRange(Actions.getExprRange(LHS.get()).getBegin(),
+ Actions.getExprRange(RHS.get()).getEnd()));
+
+ LHS = Actions.ActOnBinOp(CurScope, OpToken.getLocation(),
+ OpToken.getKind(), move(LHS), move(RHS));
+ } else
+ LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc,
+ move(LHS), move(TernaryMiddle),
+ move(RHS));
+ }
+ }
+}
+
+/// ParseCastExpression - Parse a cast-expression, or, if isUnaryExpression is
+/// true, parse a unary-expression. isAddressOfOperand exists because an
+/// id-expression that is the operand of address-of gets special treatment
+/// due to member pointers.
+///
+Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand) {
+ bool NotCastExpr;
+ OwningExprResult Res = ParseCastExpression(isUnaryExpression,
+ isAddressOfOperand,
+ NotCastExpr);
+ if (NotCastExpr)
+ Diag(Tok, diag::err_expected_expression);
+ return move(Res);
+}
+
+/// ParseCastExpression - Parse a cast-expression, or, if isUnaryExpression is
+/// true, parse a unary-expression. isAddressOfOperand exists because an
+/// id-expression that is the operand of address-of gets special treatment
+/// due to member pointers. NotCastExpr is set to true if the token is not the
+/// start of a cast-expression, and no diagnostic is emitted in this case.
+///
+/// cast-expression: [C99 6.5.4]
+/// unary-expression
+/// '(' type-name ')' cast-expression
+///
+/// unary-expression: [C99 6.5.3]
+/// postfix-expression
+/// '++' unary-expression
+/// '--' unary-expression
+/// unary-operator cast-expression
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C++0x] 'alignof' '(' type-id ')'
+/// [GNU] '&&' identifier
+/// [C++] new-expression
+/// [C++] delete-expression
+///
+/// unary-operator: one of
+/// '&' '*' '+' '-' '~' '!'
+/// [GNU] '__extension__' '__real' '__imag'
+///
+/// primary-expression: [C99 6.5.1]
+/// [C99] identifier
+/// [C++] id-expression
+/// constant
+/// string-literal
+/// [C++] boolean-literal [C++ 2.13.5]
+/// [C++0x] 'nullptr' [C++0x 2.14.7]
+/// '(' expression ')'
+/// '__func__' [C99 6.4.2.2]
+/// [GNU] '__FUNCTION__'
+/// [GNU] '__PRETTY_FUNCTION__'
+/// [GNU] '(' compound-statement ')'
+/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
+/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
+/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
+/// assign-expr ')'
+/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
+/// [GNU] '__null'
+/// [OBJC] '[' objc-message-expr ']'
+/// [OBJC] '@selector' '(' objc-selector-arg ')'
+/// [OBJC] '@protocol' '(' identifier ')'
+/// [OBJC] '@encode' '(' type-name ')'
+/// [OBJC] objc-string-literal
+/// [C++] simple-type-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
+/// [C++] typename-specifier '(' expression-list[opt] ')' [TODO]
+/// [C++] 'const_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'dynamic_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'reinterpret_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'static_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'typeid' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'typeid' '(' type-id ')' [C++ 5.2p1]
+/// [C++] 'this' [C++ 9.3.2]
+/// [G++] unary-type-trait '(' type-id ')'
+/// [G++] binary-type-trait '(' type-id ',' type-id ')' [TODO]
+/// [clang] '^' block-literal
+///
+/// constant: [C99 6.4.4]
+/// integer-constant
+/// floating-constant
+/// enumeration-constant -> identifier
+/// character-constant
+///
+/// id-expression: [C++ 5.1]
+/// unqualified-id
+/// qualified-id [TODO]
+///
+/// unqualified-id: [C++ 5.1]
+/// identifier
+/// operator-function-id
+/// conversion-function-id [TODO]
+/// '~' class-name [TODO]
+/// template-id [TODO]
+///
+/// new-expression: [C++ 5.3.4]
+/// '::'[opt] 'new' new-placement[opt] new-type-id
+/// new-initializer[opt]
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// delete-expression: [C++ 5.3.5]
+/// '::'[opt] 'delete' cast-expression
+/// '::'[opt] 'delete' '[' ']' cast-expression
+///
+/// [GNU] unary-type-trait:
+/// '__has_nothrow_assign' [TODO]
+/// '__has_nothrow_copy' [TODO]
+/// '__has_nothrow_constructor' [TODO]
+/// '__has_trivial_assign' [TODO]
+/// '__has_trivial_copy' [TODO]
+/// '__has_trivial_constructor'
+/// '__has_trivial_destructor'
+/// '__has_virtual_destructor' [TODO]
+/// '__is_abstract' [TODO]
+/// '__is_class'
+/// '__is_empty' [TODO]
+/// '__is_enum'
+/// '__is_pod'
+/// '__is_polymorphic'
+/// '__is_union'
+///
+/// [GNU] binary-type-trait:
+/// '__is_base_of' [TODO]
+///
+Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand,
+ bool &NotCastExpr) {
+ OwningExprResult Res(Actions);
+ tok::TokenKind SavedKind = Tok.getKind();
+ NotCastExpr = false;
+
+ // This handles all of cast-expression, unary-expression, postfix-expression,
+ // and primary-expression. We handle them together like this for efficiency
+ // and to simplify handling of an expression starting with a '(' token: which
+ // may be one of a parenthesized expression, cast-expression, compound literal
+ // expression, or statement expression.
+ //
+ // If the parsed tokens consist of a primary-expression, the cases below
+ // call ParsePostfixExpressionSuffix to handle the postfix expression
+ // suffixes. Cases that cannot be followed by postfix exprs should
+ // return without invoking ParsePostfixExpressionSuffix.
+ switch (SavedKind) {
+ case tok::l_paren: {
+ // If this expression is limited to being a unary-expression, the parent can
+ // not start a cast expression.
+ ParenParseOption ParenExprType =
+ isUnaryExpression ? CompoundLiteral : CastExpr;
+ TypeTy *CastTy;
+ SourceLocation LParenLoc = Tok.getLocation();
+ SourceLocation RParenLoc;
+ Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
+ CastTy, RParenLoc);
+ if (Res.isInvalid()) return move(Res);
+
+ switch (ParenExprType) {
+ case SimpleExpr: break; // Nothing else to do.
+ case CompoundStmt: break; // Nothing else to do.
+ case CompoundLiteral:
+ // We parsed '(' type-name ')' '{' ... '}'. If any suffixes of
+ // postfix-expression exist, parse them now.
+ break;
+ case CastExpr:
+ // We have parsed the cast-expression and no postfix-expr pieces are
+ // following.
+ return move(Res);
+ }
+
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ }
+
+ // primary-expression
+ case tok::numeric_constant:
+ // constant: integer-constant
+ // constant: floating-constant
+
+ Res = Actions.ActOnNumericConstant(Tok);
+ ConsumeToken();
+
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+
+ case tok::kw_true:
+ case tok::kw_false:
+ return ParseCXXBoolLiteral();
+
+ case tok::kw_nullptr:
+ return Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
+
+ case tok::identifier: { // primary-expression: identifier
+ // unqualified-id: identifier
+ // constant: enumeration-constant
+ // Turn a potentially qualified name into a annot_typename or
+ // annot_cxxscope if it would be valid. This handles things like x::y, etc.
+ if (getLang().CPlusPlus) {
+ // If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
+ if (TryAnnotateTypeOrScopeToken())
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
+ }
+
+ // Support 'Class.property' notation.
+ // We don't use isTokObjCMessageIdentifierReceiver(), since it allows
+ // 'super' (which is inappropriate here).
+ if (getLang().ObjC1 &&
+ Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), CurScope) &&
+ NextToken().is(tok::period)) {
+ IdentifierInfo &ReceiverName = *Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+ SourceLocation DotLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return ExprError();
+ }
+ IdentifierInfo &PropertyName = *Tok.getIdentifierInfo();
+ SourceLocation PropertyLoc = ConsumeToken();
+
+ Res = Actions.ActOnClassPropertyRefExpr(ReceiverName, PropertyName,
+ IdentLoc, PropertyLoc);
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ }
+ // Consume the identifier so that we can see if it is followed by a '('.
+ // Function designators are allowed to be undeclared (C99 6.5.1p2), so we
+ // need to know whether or not this identifier is a function designator or
+ // not.
+ IdentifierInfo &II = *Tok.getIdentifierInfo();
+ SourceLocation L = ConsumeToken();
+ Res = Actions.ActOnIdentifierExpr(CurScope, L, II, Tok.is(tok::l_paren));
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ }
+ case tok::char_constant: // constant: character-constant
+ Res = Actions.ActOnCharacterConstant(Tok);
+ ConsumeToken();
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ case tok::kw___func__: // primary-expression: __func__ [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: // primary-expression: __FUNCTION__ [GNU]
+ case tok::kw___PRETTY_FUNCTION__: // primary-expression: __P..Y_F..N__ [GNU]
+ Res = Actions.ActOnPredefinedExpr(Tok.getLocation(), SavedKind);
+ ConsumeToken();
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ case tok::string_literal: // primary-expression: string-literal
+ case tok::wide_string_literal:
+ Res = ParseStringLiteralExpression();
+ if (Res.isInvalid()) return move(Res);
+ // This can be followed by postfix-expr pieces (e.g. "foo"[1]).
+ return ParsePostfixExpressionSuffix(move(Res));
+ case tok::kw___builtin_va_arg:
+ case tok::kw___builtin_offsetof:
+ case tok::kw___builtin_choose_expr:
+ case tok::kw___builtin_types_compatible_p:
+ return ParseBuiltinPrimaryExpression();
+ case tok::kw___null:
+ return Actions.ActOnGNUNullExpr(ConsumeToken());
+ break;
+ case tok::plusplus: // unary-expression: '++' unary-expression
+ case tok::minusminus: { // unary-expression: '--' unary-expression
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(true);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ return move(Res);
+ }
+ case tok::amp: { // unary-expression: '&' cast-expression
+ // Special treatment because of member pointers
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false, true);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ return move(Res);
+ }
+
+ case tok::star: // unary-expression: '*' cast-expression
+ case tok::plus: // unary-expression: '+' cast-expression
+ case tok::minus: // unary-expression: '-' cast-expression
+ case tok::tilde: // unary-expression: '~' cast-expression
+ case tok::exclaim: // unary-expression: '!' cast-expression
+ case tok::kw___real: // unary-expression: '__real' cast-expression [GNU]
+ case tok::kw___imag: { // unary-expression: '__imag' cast-expression [GNU]
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ return move(Res);
+ }
+
+ case tok::kw___extension__:{//unary-expression:'__extension__' cast-expr [GNU]
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res));
+ return move(Res);
+ }
+ case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression
+ // unary-expression: 'sizeof' '(' type-name ')'
+ case tok::kw_alignof:
+ case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
+ // unary-expression: '__alignof' '(' type-name ')'
+ // unary-expression: 'alignof' '(' type-id ')'
+ return ParseSizeofAlignofExpression();
+ case tok::ampamp: { // unary-expression: '&&' identifier
+ SourceLocation AmpAmpLoc = ConsumeToken();
+ if (Tok.isNot(tok::identifier))
+ return ExprError(Diag(Tok, diag::err_expected_ident));
+
+ Diag(AmpAmpLoc, diag::ext_gnu_address_of_label);
+ Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(),
+ Tok.getIdentifierInfo());
+ ConsumeToken();
+ return move(Res);
+ }
+ case tok::kw_const_cast:
+ case tok::kw_dynamic_cast:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_static_cast:
+ Res = ParseCXXCasts();
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ case tok::kw_typeid:
+ Res = ParseCXXTypeid();
+ // This can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ case tok::kw_this:
+ Res = ParseCXXThis();
+ // This can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::kw_typename:
+ case tok::kw_typeof:
+ case tok::annot_typename: {
+ if (!getLang().CPlusPlus) {
+ Diag(Tok, diag::err_expected_expression);
+ return ExprError();
+ }
+
+ // postfix-expression: simple-type-specifier '(' expression-list[opt] ')'
+ //
+ DeclSpec DS;
+ ParseCXXSimpleTypeSpecifier(DS);
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after_type)
+ << DS.getSourceRange());
+
+ Res = ParseCXXTypeConstructExpression(DS);
+ // This can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(move(Res));
+ }
+
+ case tok::annot_cxxscope: // [C++] id-expression: qualified-id
+ case tok::kw_operator: // [C++] id-expression: operator/conversion-function-id
+ // template-id
+ Res = ParseCXXIdExpression(isAddressOfOperand);
+ return ParsePostfixExpressionSuffix(move(Res));
+
+ case tok::coloncolon: {
+ // ::foo::bar -> global qualified name etc. If TryAnnotateTypeOrScopeToken
+ // annotates the token, tail recurse.
+ if (TryAnnotateTypeOrScopeToken())
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
+
+ // ::new -> [C++] new-expression
+ // ::delete -> [C++] delete-expression
+ SourceLocation CCLoc = ConsumeToken();
+ if (Tok.is(tok::kw_new))
+ return ParseCXXNewExpression(true, CCLoc);
+ if (Tok.is(tok::kw_delete))
+ return ParseCXXDeleteExpression(true, CCLoc);
+
+ // This is not a type name or scope specifier, it is an invalid expression.
+ Diag(CCLoc, diag::err_expected_expression);
+ return ExprError();
+ }
+
+ case tok::kw_new: // [C++] new-expression
+ return ParseCXXNewExpression(false, Tok.getLocation());
+
+ case tok::kw_delete: // [C++] delete-expression
+ return ParseCXXDeleteExpression(false, Tok.getLocation());
+
+ case tok::kw___is_pod: // [GNU] unary-type-trait
+ case tok::kw___is_class:
+ case tok::kw___is_enum:
+ case tok::kw___is_union:
+ case tok::kw___is_polymorphic:
+ case tok::kw___is_abstract:
+ case tok::kw___has_trivial_constructor:
+ case tok::kw___has_trivial_destructor:
+ return ParseUnaryTypeTrait();
+
+ case tok::at: {
+ SourceLocation AtLoc = ConsumeToken();
+ return ParseObjCAtExpression(AtLoc);
+ }
+ case tok::caret:
+ return ParsePostfixExpressionSuffix(ParseBlockLiteralExpression());
+ case tok::l_square:
+ // These can be followed by postfix-expr pieces.
+ if (getLang().ObjC1)
+ return ParsePostfixExpressionSuffix(ParseObjCMessageExpression());
+ // FALL THROUGH.
+ default:
+ NotCastExpr = true;
+ return ExprError();
+ }
+
+ // unreachable.
+ abort();
+}
+
+/// ParsePostfixExpressionSuffix - Once the leading part of a postfix-expression
+/// is parsed, this method parses any suffixes that apply.
+///
+/// postfix-expression: [C99 6.5.2]
+/// primary-expression
+/// postfix-expression '[' expression ']'
+/// postfix-expression '(' argument-expression-list[opt] ')'
+/// postfix-expression '.' identifier
+/// postfix-expression '->' identifier
+/// postfix-expression '++'
+/// postfix-expression '--'
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+///
+/// argument-expression-list: [C99 6.5.2]
+/// argument-expression
+/// argument-expression-list ',' assignment-expression
+///
+Parser::OwningExprResult
+Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
+ // Now that the primary-expression piece of the postfix-expression has been
+ // parsed, see if there are any postfix-expression pieces here.
+ SourceLocation Loc;
+ while (1) {
+ switch (Tok.getKind()) {
+ default: // Not a postfix-expression suffix.
+ return move(LHS);
+ case tok::l_square: { // postfix-expression: p-e '[' expression ']'
+ Loc = ConsumeBracket();
+ OwningExprResult Idx(ParseExpression());
+
+ SourceLocation RLoc = Tok.getLocation();
+
+ if (!LHS.isInvalid() && !Idx.isInvalid() && Tok.is(tok::r_square)) {
+ LHS = Actions.ActOnArraySubscriptExpr(CurScope, move(LHS), Loc,
+ move(Idx), RLoc);
+ } else
+ LHS = ExprError();
+
+ // Match the ']'.
+ MatchRHSPunctuation(tok::r_square, Loc);
+ break;
+ }
+
+ case tok::l_paren: { // p-e: p-e '(' argument-expression-list[opt] ')'
+ ExprVector ArgExprs(Actions);
+ CommaLocsTy CommaLocs;
+
+ Loc = ConsumeParen();
+
+ if (Tok.isNot(tok::r_paren)) {
+ if (ParseExpressionList(ArgExprs, CommaLocs)) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ }
+
+ // Match the ')'.
+ if (Tok.isNot(tok::r_paren)) {
+ MatchRHSPunctuation(tok::r_paren, Loc);
+ return ExprError();
+ }
+
+ if (!LHS.isInvalid()) {
+ assert((ArgExprs.size() == 0 || ArgExprs.size()-1 == CommaLocs.size())&&
+ "Unexpected number of commas!");
+ LHS = Actions.ActOnCallExpr(CurScope, move(LHS), Loc,
+ move_arg(ArgExprs), CommaLocs.data(),
+ Tok.getLocation());
+ }
+
+ ConsumeParen();
+ break;
+ }
+ case tok::arrow: // postfix-expression: p-e '->' identifier
+ case tok::period: { // postfix-expression: p-e '.' identifier
+ tok::TokenKind OpKind = Tok.getKind();
+ SourceLocation OpLoc = ConsumeToken(); // Eat the "." or "->" token.
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return ExprError();
+ }
+
+ if (!LHS.isInvalid()) {
+ LHS = Actions.ActOnMemberReferenceExpr(CurScope, move(LHS), OpLoc,
+ OpKind, Tok.getLocation(),
+ *Tok.getIdentifierInfo(),
+ ObjCImpDecl);
+ }
+ ConsumeToken();
+ break;
+ }
+ case tok::plusplus: // postfix-expression: postfix-expression '++'
+ case tok::minusminus: // postfix-expression: postfix-expression '--'
+ if (!LHS.isInvalid()) {
+ LHS = Actions.ActOnPostfixUnaryOp(CurScope, Tok.getLocation(),
+ Tok.getKind(), move(LHS));
+ }
+ ConsumeToken();
+ break;
+ }
+ }
+}
+
+/// ParseExprAfterTypeofSizeofAlignof - We parsed a typeof/sizeof/alignof and
+/// we are at the start of an expression or a parenthesized type-id.
+/// OpTok is the operand token (typeof/sizeof/alignof). Returns the expression
+/// (isCastExpr == false) or the type (isCastExpr == true).
+///
+/// unary-expression: [C99 6.5.3]
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C++0x] 'alignof' '(' type-id ')'
+///
+/// [GNU] typeof-specifier:
+/// typeof ( expressions )
+/// typeof ( type-name )
+/// [GNU/C++] typeof unary-expression
+///
+Parser::OwningExprResult
+Parser::ParseExprAfterTypeofSizeofAlignof(const Token &OpTok,
+ bool &isCastExpr,
+ TypeTy *&CastTy,
+ SourceRange &CastRange) {
+
+ assert((OpTok.is(tok::kw_typeof) || OpTok.is(tok::kw_sizeof) ||
+ OpTok.is(tok::kw___alignof) || OpTok.is(tok::kw_alignof)) &&
+ "Not a typeof/sizeof/alignof expression!");
+
+ OwningExprResult Operand(Actions);
+
+ // If the operand doesn't start with an '(', it must be an expression.
+ if (Tok.isNot(tok::l_paren)) {
+ isCastExpr = false;
+ if (OpTok.is(tok::kw_typeof) && !getLang().CPlusPlus) {
+ Diag(Tok,diag::err_expected_lparen_after_id) << OpTok.getIdentifierInfo();
+ return ExprError();
+ }
+ Operand = ParseCastExpression(true/*isUnaryExpression*/);
+
+ } else {
+ // If it starts with a '(', we know that it is either a parenthesized
+ // type-name, or it is a unary-expression that starts with a compound
+ // literal, or starts with a primary-expression that is a parenthesized
+ // expression.
+ ParenParseOption ExprType = CastExpr;
+ SourceLocation LParenLoc = Tok.getLocation(), RParenLoc;
+ Operand = ParseParenExpression(ExprType, true/*stopIfCastExpr*/,
+ CastTy, RParenLoc);
+ CastRange = SourceRange(LParenLoc, RParenLoc);
+
+ // If ParseParenExpression parsed a '(typename)' sequence only, then this is
+ // a type.
+ if (ExprType == CastExpr) {
+ isCastExpr = true;
+ return ExprEmpty();
+ }
+
+ // If this is a parenthesized expression, it is the start of a
+ // unary-expression, but doesn't include any postfix pieces. Parse these
+ // now if present.
+ Operand = ParsePostfixExpressionSuffix(move(Operand));
+ }
+
+ // If we get here, the operand to the typeof/sizeof/alignof was an expresion.
+ isCastExpr = false;
+ return move(Operand);
+}
+
+
+/// ParseSizeofAlignofExpression - Parse a sizeof or alignof expression.
+/// unary-expression: [C99 6.5.3]
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C++0x] 'alignof' '(' type-id ')'
+Parser::OwningExprResult Parser::ParseSizeofAlignofExpression() {
+ assert((Tok.is(tok::kw_sizeof) || Tok.is(tok::kw___alignof)
+ || Tok.is(tok::kw_alignof)) &&
+ "Not a sizeof/alignof expression!");
+ Token OpTok = Tok;
+ ConsumeToken();
+
+ bool isCastExpr;
+ TypeTy *CastTy;
+ SourceRange CastRange;
+ OwningExprResult Operand = ParseExprAfterTypeofSizeofAlignof(OpTok,
+ isCastExpr,
+ CastTy,
+ CastRange);
+
+ if (isCastExpr)
+ return Actions.ActOnSizeOfAlignOfExpr(OpTok.getLocation(),
+ OpTok.is(tok::kw_sizeof),
+ /*isType=*/true, CastTy,
+ CastRange);
+
+ // If we get here, the operand to the sizeof/alignof was an expresion.
+ if (!Operand.isInvalid())
+ Operand = Actions.ActOnSizeOfAlignOfExpr(OpTok.getLocation(),
+ OpTok.is(tok::kw_sizeof),
+ /*isType=*/false,
+ Operand.release(), CastRange);
+ return move(Operand);
+}
+
+/// ParseBuiltinPrimaryExpression
+///
+/// primary-expression: [C99 6.5.1]
+/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
+/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
+/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
+/// assign-expr ')'
+/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
+///
+/// [GNU] offsetof-member-designator:
+/// [GNU] identifier
+/// [GNU] offsetof-member-designator '.' identifier
+/// [GNU] offsetof-member-designator '[' expression ']'
+///
+Parser::OwningExprResult Parser::ParseBuiltinPrimaryExpression() {
+ OwningExprResult Res(Actions);
+ const IdentifierInfo *BuiltinII = Tok.getIdentifierInfo();
+
+ tok::TokenKind T = Tok.getKind();
+ SourceLocation StartLoc = ConsumeToken(); // Eat the builtin identifier.
+
+ // All of these start with an open paren.
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after_id)
+ << BuiltinII);
+
+ SourceLocation LParenLoc = ConsumeParen();
+ // TODO: Build AST.
+
+ switch (T) {
+ default: assert(0 && "Not a builtin primary expression!");
+ case tok::kw___builtin_va_arg: {
+ OwningExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ TypeResult Ty = ParseTypeName();
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ return ExprError();
+ }
+ if (Ty.isInvalid())
+ Res = ExprError();
+ else
+ Res = Actions.ActOnVAArg(StartLoc, move(Expr), Ty.get(), ConsumeParen());
+ break;
+ }
+ case tok::kw___builtin_offsetof: {
+ SourceLocation TypeLoc = Tok.getLocation();
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ // We must have at least one identifier here.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ // Keep track of the various subcomponents we see.
+ llvm::SmallVector<Action::OffsetOfComponent, 4> Comps;
+
+ Comps.push_back(Action::OffsetOfComponent());
+ Comps.back().isBrackets = false;
+ Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
+ Comps.back().LocStart = Comps.back().LocEnd = ConsumeToken();
+
+ // FIXME: This loop leaks the index expressions on error.
+ while (1) {
+ if (Tok.is(tok::period)) {
+ // offsetof-member-designator: offsetof-member-designator '.' identifier
+ Comps.push_back(Action::OffsetOfComponent());
+ Comps.back().isBrackets = false;
+ Comps.back().LocStart = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
+ Comps.back().LocEnd = ConsumeToken();
+
+ } else if (Tok.is(tok::l_square)) {
+ // offsetof-member-designator: offsetof-member-design '[' expression ']'
+ Comps.push_back(Action::OffsetOfComponent());
+ Comps.back().isBrackets = true;
+ Comps.back().LocStart = ConsumeBracket();
+ Res = ParseExpression();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Res);
+ }
+ Comps.back().U.E = Res.release();
+
+ Comps.back().LocEnd =
+ MatchRHSPunctuation(tok::r_square, Comps.back().LocStart);
+ } else if (Tok.is(tok::r_paren)) {
+ if (Ty.isInvalid())
+ Res = ExprError();
+ else
+ Res = Actions.ActOnBuiltinOffsetOf(CurScope, StartLoc, TypeLoc,
+ Ty.get(), &Comps[0],
+ Comps.size(), ConsumeParen());
+ break;
+ } else {
+ // Error occurred.
+ return ExprError();
+ }
+ }
+ break;
+ }
+ case tok::kw___builtin_choose_expr: {
+ OwningExprResult Cond(ParseAssignmentExpression());
+ if (Cond.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Cond);
+ }
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ OwningExprResult Expr1(ParseAssignmentExpression());
+ if (Expr1.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Expr1);
+ }
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ OwningExprResult Expr2(ParseAssignmentExpression());
+ if (Expr2.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Expr2);
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ return ExprError();
+ }
+ Res = Actions.ActOnChooseExpr(StartLoc, move(Cond), move(Expr1),
+ move(Expr2), ConsumeParen());
+ break;
+ }
+ case tok::kw___builtin_types_compatible_p:
+ TypeResult Ty1 = ParseTypeName();
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ TypeResult Ty2 = ParseTypeName();
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ return ExprError();
+ }
+
+ if (Ty1.isInvalid() || Ty2.isInvalid())
+ Res = ExprError();
+ else
+ Res = Actions.ActOnTypesCompatibleExpr(StartLoc, Ty1.get(), Ty2.get(),
+ ConsumeParen());
+ break;
+ }
+
+ // These can be followed by postfix-expr pieces because they are
+ // primary-expressions.
+ return ParsePostfixExpressionSuffix(move(Res));
+}
+
+/// ParseParenExpression - This parses the unit that starts with a '(' token,
+/// based on what is allowed by ExprType. The actual thing parsed is returned
+/// in ExprType. If stopIfCastExpr is true, it will only return the parsed type,
+/// not the parsed cast-expression.
+///
+/// primary-expression: [C99 6.5.1]
+/// '(' expression ')'
+/// [GNU] '(' compound-statement ')' (if !ParenExprOnly)
+/// postfix-expression: [C99 6.5.2]
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+/// cast-expression: [C99 6.5.4]
+/// '(' type-name ')' cast-expression
+///
+Parser::OwningExprResult
+Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
+ TypeTy *&CastTy, SourceLocation &RParenLoc) {
+ assert(Tok.is(tok::l_paren) && "Not a paren expr!");
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
+ SourceLocation OpenLoc = ConsumeParen();
+ OwningExprResult Result(Actions, true);
+ bool isAmbiguousTypeId;
+ CastTy = 0;
+
+ if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::ext_gnu_statement_expr);
+ OwningStmtResult Stmt(ParseCompoundStatement(true));
+ ExprType = CompoundStmt;
+
+ // If the substmt parsed correctly, build the AST node.
+ if (!Stmt.isInvalid() && Tok.is(tok::r_paren))
+ Result = Actions.ActOnStmtExpr(OpenLoc, move(Stmt), Tok.getLocation());
+
+ } else if (ExprType >= CompoundLiteral &&
+ isTypeIdInParens(isAmbiguousTypeId)) {
+
+ // Otherwise, this is a compound literal expression or cast expression.
+
+ // In C++, if the type-id is ambiguous we disambiguate based on context.
+ // If stopIfCastExpr is true the context is a typeof/sizeof/alignof
+ // in which case we should treat it as type-id.
+ // if stopIfCastExpr is false, we need to determine the context past the
+ // parens, so we defer to ParseCXXAmbiguousParenExpression for that.
+ if (isAmbiguousTypeId && !stopIfCastExpr)
+ return ParseCXXAmbiguousParenExpression(ExprType, CastTy,
+ OpenLoc, RParenLoc);
+
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else
+ MatchRHSPunctuation(tok::r_paren, OpenLoc);
+
+ if (Tok.is(tok::l_brace)) {
+ ExprType = CompoundLiteral;
+ return ParseCompoundLiteralExpression(Ty.get(), OpenLoc, RParenLoc);
+ }
+
+ if (ExprType == CastExpr) {
+ // We parsed '(' type-name ')' and the thing after it wasn't a '{'.
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ CastTy = Ty.get();
+
+ if (stopIfCastExpr) {
+ // Note that this doesn't parse the subsequent cast-expression, it just
+ // returns the parsed type to the callee.
+ return OwningExprResult(Actions);
+ }
+
+ // Parse the cast-expression that follows it next.
+ // TODO: For cast expression with CastTy.
+ Result = ParseCastExpression(false);
+ if (!Result.isInvalid())
+ Result = Actions.ActOnCastExpr(OpenLoc, CastTy, RParenLoc,move(Result));
+ return move(Result);
+ }
+
+ Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
+ return ExprError();
+ } else {
+ Result = ParseExpression();
+ ExprType = SimpleExpr;
+ if (!Result.isInvalid() && Tok.is(tok::r_paren))
+ Result = Actions.ActOnParenExpr(OpenLoc, Tok.getLocation(), move(Result));
+ }
+
+ // Match the ')'.
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else
+ MatchRHSPunctuation(tok::r_paren, OpenLoc);
+
+ return move(Result);
+}
+
+/// ParseCompoundLiteralExpression - We have parsed the parenthesized type-name
+/// and we are at the left brace.
+///
+/// postfix-expression: [C99 6.5.2]
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+///
+Parser::OwningExprResult
+Parser::ParseCompoundLiteralExpression(TypeTy *Ty,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ assert(Tok.is(tok::l_brace) && "Not a compound literal!");
+ if (!getLang().C99) // Compound literals don't exist in C90.
+ Diag(LParenLoc, diag::ext_c99_compound_literal);
+ OwningExprResult Result = ParseInitializer();
+ if (!Result.isInvalid() && Ty)
+ return Actions.ActOnCompoundLiteral(LParenLoc, Ty, RParenLoc, move(Result));
+ return move(Result);
+}
+
+/// ParseStringLiteralExpression - This handles the various token types that
+/// form string literals, and also handles string concatenation [C99 5.1.1.2,
+/// translation phase #6].
+///
+/// primary-expression: [C99 6.5.1]
+/// string-literal
+Parser::OwningExprResult Parser::ParseStringLiteralExpression() {
+ assert(isTokenStringLiteral() && "Not a string literal!");
+
+ // String concat. Note that keywords like __func__ and __FUNCTION__ are not
+ // considered to be strings for concatenation purposes.
+ llvm::SmallVector<Token, 4> StringToks;
+
+ do {
+ StringToks.push_back(Tok);
+ ConsumeStringToken();
+ } while (isTokenStringLiteral());
+
+ // Pass the set of string tokens, ready for concatenation, to the actions.
+ return Actions.ActOnStringLiteral(&StringToks[0], StringToks.size());
+}
+
+/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
+///
+/// argument-expression-list:
+/// assignment-expression
+/// argument-expression-list , assignment-expression
+///
+/// [C++] expression-list:
+/// [C++] assignment-expression
+/// [C++] expression-list , assignment-expression
+///
+bool Parser::ParseExpressionList(ExprListTy &Exprs, CommaLocsTy &CommaLocs) {
+ while (1) {
+ OwningExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid())
+ return true;
+
+ Exprs.push_back(Expr.release());
+
+ if (Tok.isNot(tok::comma))
+ return false;
+ // Move to the next argument, remember where the comma was.
+ CommaLocs.push_back(ConsumeToken());
+ }
+}
+
+/// ParseBlockId - Parse a block-id, which roughly looks like int (int x).
+///
+/// [clang] block-id:
+/// [clang] specifier-qualifier-list block-declarator
+///
+void Parser::ParseBlockId() {
+ // Parse the specifier-qualifier-list piece.
+ DeclSpec DS;
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the block-declarator.
+ Declarator DeclaratorInfo(DS, Declarator::BlockLiteralContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // We do this for: ^ __attribute__((noreturn)) {, as DS has the attributes.
+ DeclaratorInfo.AddAttributes(DS.TakeAttributes(),
+ SourceLocation());
+
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ DeclaratorInfo.AddAttributes(AttrList, Loc);
+ }
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(DeclaratorInfo, CurScope);
+}
+
+/// ParseBlockLiteralExpression - Parse a block literal, which roughly looks
+/// like ^(int x){ return x+1; }
+///
+/// block-literal:
+/// [clang] '^' block-args[opt] compound-statement
+/// [clang] '^' block-id compound-statement
+/// [clang] block-args:
+/// [clang] '(' parameter-list ')'
+///
+Parser::OwningExprResult Parser::ParseBlockLiteralExpression() {
+ assert(Tok.is(tok::caret) && "block literal starts with ^");
+ SourceLocation CaretLoc = ConsumeToken();
+
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), CaretLoc,
+ "block literal parsing");
+
+ // Enter a scope to hold everything within the block. This includes the
+ // argument decls, decls within the compound expression, etc. This also
+ // allows determining whether a variable reference inside the block is
+ // within or outside of the block.
+ ParseScope BlockScope(this, Scope::BlockScope | Scope::FnScope |
+ Scope::BreakScope | Scope::ContinueScope |
+ Scope::DeclScope);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockStart(CaretLoc, CurScope);
+
+ // Parse the return type if present.
+ DeclSpec DS;
+ Declarator ParamInfo(DS, Declarator::BlockLiteralContext);
+ // FIXME: Since the return type isn't actually parsed, it can't be used to
+ // fill ParamInfo with an initial valid range, so do it manually.
+ ParamInfo.SetSourceRange(SourceRange(Tok.getLocation(), Tok.getLocation()));
+
+ // If this block has arguments, parse them. There is no ambiguity here with
+ // the expression case, because the expression case requires a parameter list.
+ if (Tok.is(tok::l_paren)) {
+ ParseParenDeclarator(ParamInfo);
+ // Parse the pieces after the identifier as if we had "int(...)".
+ // SetIdentifier sets the source range end, but in this case we're past
+ // that location.
+ SourceLocation Tmp = ParamInfo.getSourceRange().getEnd();
+ ParamInfo.SetIdentifier(0, CaretLoc);
+ ParamInfo.SetRangeEnd(Tmp);
+ if (ParamInfo.isInvalidType()) {
+ // If there was an error parsing the arguments, they may have
+ // tried to use ^(x+y) which requires an argument list. Just
+ // skip the whole block literal.
+ Actions.ActOnBlockError(CaretLoc, CurScope);
+ return ExprError();
+ }
+
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ ParamInfo.AddAttributes(AttrList, Loc);
+ }
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(ParamInfo, CurScope);
+ } else if (!Tok.is(tok::l_brace)) {
+ ParseBlockId();
+ } else {
+ // Otherwise, pretend we saw (void).
+ ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(true, false,
+ SourceLocation(),
+ 0, 0, 0,
+ false, SourceLocation(),
+ false, 0, 0, 0,
+ CaretLoc, ParamInfo),
+ CaretLoc);
+
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ ParamInfo.AddAttributes(AttrList, Loc);
+ }
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(ParamInfo, CurScope);
+ }
+
+
+ OwningExprResult Result(Actions, true);
+ if (!Tok.is(tok::l_brace)) {
+ // Saw something like: ^expr
+ Diag(Tok, diag::err_expected_expression);
+ Actions.ActOnBlockError(CaretLoc, CurScope);
+ return ExprError();
+ }
+
+ OwningStmtResult Stmt(ParseCompoundStatementBody());
+ if (!Stmt.isInvalid())
+ Result = Actions.ActOnBlockStmtExpr(CaretLoc, move(Stmt), CurScope);
+ else
+ Actions.ActOnBlockError(CaretLoc, CurScope);
+ return move(Result);
+}
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
new file mode 100644
index 0000000..681c6ad
--- /dev/null
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -0,0 +1,1166 @@
+//===--- ParseExprCXX.cpp - C++ Expression Parsing ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expression parsing implementation for C++.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/DeclSpec.h"
+using namespace clang;
+
+/// ParseOptionalCXXScopeSpecifier - Parse global scope or
+/// nested-name-specifier if present. Returns true if a nested-name-specifier
+/// was parsed from the token stream. Note that this routine will not parse
+/// ::new or ::delete, it will just leave them in the token stream.
+///
+/// '::'[opt] nested-name-specifier
+/// '::'
+///
+/// nested-name-specifier:
+/// type-name '::'
+/// namespace-name '::'
+/// nested-name-specifier identifier '::'
+/// nested-name-specifier 'template'[opt] simple-template-id '::' [TODO]
+///
+bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS) {
+ assert(getLang().CPlusPlus &&
+ "Call sites of this function should be guarded by checking for C++");
+
+ if (Tok.is(tok::annot_cxxscope)) {
+ SS.setScopeRep(Tok.getAnnotationValue());
+ SS.setRange(Tok.getAnnotationRange());
+ ConsumeToken();
+ return true;
+ }
+
+ bool HasScopeSpecifier = false;
+
+ if (Tok.is(tok::coloncolon)) {
+ // ::new and ::delete aren't nested-name-specifiers.
+ tok::TokenKind NextKind = NextToken().getKind();
+ if (NextKind == tok::kw_new || NextKind == tok::kw_delete)
+ return false;
+
+ // '::' - Global scope qualifier.
+ SourceLocation CCLoc = ConsumeToken();
+ SS.setBeginLoc(CCLoc);
+ SS.setScopeRep(Actions.ActOnCXXGlobalScopeSpecifier(CurScope, CCLoc));
+ SS.setEndLoc(CCLoc);
+ HasScopeSpecifier = true;
+ }
+
+ while (true) {
+ // nested-name-specifier:
+ // type-name '::'
+ // namespace-name '::'
+ // nested-name-specifier identifier '::'
+ if (Tok.is(tok::identifier) && NextToken().is(tok::coloncolon)) {
+ // We have an identifier followed by a '::'. Lookup this name
+ // as the name in a nested-name-specifier.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+ assert(Tok.is(tok::coloncolon) && "NextToken() not working properly!");
+ SourceLocation CCLoc = ConsumeToken();
+
+ if (!HasScopeSpecifier) {
+ SS.setBeginLoc(IdLoc);
+ HasScopeSpecifier = true;
+ }
+
+ if (SS.isInvalid())
+ continue;
+
+ SS.setScopeRep(
+ Actions.ActOnCXXNestedNameSpecifier(CurScope, SS, IdLoc, CCLoc, *II));
+ SS.setEndLoc(CCLoc);
+ continue;
+ }
+
+ // nested-name-specifier:
+ // type-name '::'
+ // nested-name-specifier 'template'[opt] simple-template-id '::'
+ if ((Tok.is(tok::identifier) && NextToken().is(tok::less)) ||
+ Tok.is(tok::kw_template)) {
+ // Parse the optional 'template' keyword, then make sure we have
+ // 'identifier <' after it.
+ if (Tok.is(tok::kw_template)) {
+ SourceLocation TemplateKWLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok.getLocation(),
+ diag::err_id_after_template_in_nested_name_spec)
+ << SourceRange(TemplateKWLoc);
+ break;
+ }
+
+ if (NextToken().isNot(tok::less)) {
+ Diag(NextToken().getLocation(),
+ diag::err_less_after_template_name_in_nested_name_spec)
+ << Tok.getIdentifierInfo()->getName()
+ << SourceRange(TemplateKWLoc, Tok.getLocation());
+ break;
+ }
+
+ TemplateTy Template
+ = Actions.ActOnDependentTemplateName(TemplateKWLoc,
+ *Tok.getIdentifierInfo(),
+ Tok.getLocation(),
+ SS);
+ AnnotateTemplateIdToken(Template, TNK_Dependent_template_name,
+ &SS, TemplateKWLoc, false);
+ continue;
+ }
+
+ TemplateTy Template;
+ TemplateNameKind TNK = Actions.isTemplateName(*Tok.getIdentifierInfo(),
+ CurScope, Template, &SS);
+ if (TNK) {
+ // We have found a template name, so annotate this this token
+ // with a template-id annotation. We do not permit the
+ // template-id to be translated into a type annotation,
+ // because some clients (e.g., the parsing of class template
+ // specializations) still want to see the original template-id
+ // token.
+ AnnotateTemplateIdToken(Template, TNK, &SS, SourceLocation(), false);
+ continue;
+ }
+ }
+
+ if (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) {
+ // We have
+ //
+ // simple-template-id '::'
+ //
+ // So we need to check whether the simple-template-id is of the
+ // right kind (it should name a type or be dependent), and then
+ // convert it into a type within the nested-name-specifier.
+ TemplateIdAnnotation *TemplateId
+ = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+
+ if (TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) {
+ AnnotateTemplateIdTokenAsType(&SS);
+ SS.setScopeRep(0);
+
+ assert(Tok.is(tok::annot_typename) &&
+ "AnnotateTemplateIdTokenAsType isn't working");
+ Token TypeToken = Tok;
+ ConsumeToken();
+ assert(Tok.is(tok::coloncolon) && "NextToken() not working properly!");
+ SourceLocation CCLoc = ConsumeToken();
+
+ if (!HasScopeSpecifier) {
+ SS.setBeginLoc(TypeToken.getLocation());
+ HasScopeSpecifier = true;
+ }
+
+ if (TypeToken.getAnnotationValue())
+ SS.setScopeRep(
+ Actions.ActOnCXXNestedNameSpecifier(CurScope, SS,
+ TypeToken.getAnnotationValue(),
+ TypeToken.getAnnotationRange(),
+ CCLoc));
+ else
+ SS.setScopeRep(0);
+ SS.setEndLoc(CCLoc);
+ continue;
+ } else
+ assert(false && "FIXME: Only type template names supported here");
+ }
+
+ // We don't have any tokens that form the beginning of a
+ // nested-name-specifier, so we're done.
+ break;
+ }
+
+ return HasScopeSpecifier;
+}
+
+/// ParseCXXIdExpression - Handle id-expression.
+///
+/// id-expression:
+/// unqualified-id
+/// qualified-id
+///
+/// unqualified-id:
+/// identifier
+/// operator-function-id
+/// conversion-function-id [TODO]
+/// '~' class-name [TODO]
+/// template-id [TODO]
+///
+/// qualified-id:
+/// '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+/// '::' identifier
+/// '::' operator-function-id
+/// '::' template-id [TODO]
+///
+/// nested-name-specifier:
+/// type-name '::'
+/// namespace-name '::'
+/// nested-name-specifier identifier '::'
+/// nested-name-specifier 'template'[opt] simple-template-id '::' [TODO]
+///
+/// NOTE: The standard specifies that, for qualified-id, the parser does not
+/// expect:
+///
+/// '::' conversion-function-id
+/// '::' '~' class-name
+///
+/// This may cause a slight inconsistency on diagnostics:
+///
+/// class C {};
+/// namespace A {}
+/// void f() {
+/// :: A :: ~ C(); // Some Sema error about using destructor with a
+/// // namespace.
+/// :: ~ C(); // Some Parser error like 'unexpected ~'.
+/// }
+///
+/// We simplify the parser a bit and make it work like:
+///
+/// qualified-id:
+/// '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+/// '::' unqualified-id
+///
+/// That way Sema can handle and report similar errors for namespaces and the
+/// global scope.
+///
+/// The isAddressOfOperand parameter indicates that this id-expression is a
+/// direct operand of the address-of operator. This is, besides member contexts,
+/// the only place where a qualified-id naming a non-static class member may
+/// appear.
+///
+Parser::OwningExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
+ // qualified-id:
+ // '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+ // '::' unqualified-id
+ //
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS);
+
+ // unqualified-id:
+ // identifier
+ // operator-function-id
+ // conversion-function-id
+ // '~' class-name [TODO]
+ // template-id [TODO]
+ //
+ switch (Tok.getKind()) {
+ default:
+ return ExprError(Diag(Tok, diag::err_expected_unqualified_id));
+
+ case tok::identifier: {
+ // Consume the identifier so that we can see if it is followed by a '('.
+ IdentifierInfo &II = *Tok.getIdentifierInfo();
+ SourceLocation L = ConsumeToken();
+ return Actions.ActOnIdentifierExpr(CurScope, L, II, Tok.is(tok::l_paren),
+ &SS, isAddressOfOperand);
+ }
+
+ case tok::kw_operator: {
+ SourceLocation OperatorLoc = Tok.getLocation();
+ if (OverloadedOperatorKind Op = TryParseOperatorFunctionId())
+ return Actions.ActOnCXXOperatorFunctionIdExpr(
+ CurScope, OperatorLoc, Op, Tok.is(tok::l_paren), SS,
+ isAddressOfOperand);
+ if (TypeTy *Type = ParseConversionFunctionId())
+ return Actions.ActOnCXXConversionFunctionExpr(CurScope, OperatorLoc, Type,
+ Tok.is(tok::l_paren), SS,
+ isAddressOfOperand);
+
+ // We already complained about a bad conversion-function-id,
+ // above.
+ return ExprError();
+ }
+
+ } // switch.
+
+ assert(0 && "The switch was supposed to take care everything.");
+}
+
+/// ParseCXXCasts - This handles the various ways to cast expressions to another
+/// type.
+///
+/// postfix-expression: [C++ 5.2p1]
+/// 'dynamic_cast' '<' type-name '>' '(' expression ')'
+/// 'static_cast' '<' type-name '>' '(' expression ')'
+/// 'reinterpret_cast' '<' type-name '>' '(' expression ')'
+/// 'const_cast' '<' type-name '>' '(' expression ')'
+///
+Parser::OwningExprResult Parser::ParseCXXCasts() {
+ tok::TokenKind Kind = Tok.getKind();
+ const char *CastName = 0; // For error messages
+
+ switch (Kind) {
+ default: assert(0 && "Unknown C++ cast!"); abort();
+ case tok::kw_const_cast: CastName = "const_cast"; break;
+ case tok::kw_dynamic_cast: CastName = "dynamic_cast"; break;
+ case tok::kw_reinterpret_cast: CastName = "reinterpret_cast"; break;
+ case tok::kw_static_cast: CastName = "static_cast"; break;
+ }
+
+ SourceLocation OpLoc = ConsumeToken();
+ SourceLocation LAngleBracketLoc = Tok.getLocation();
+
+ if (ExpectAndConsume(tok::less, diag::err_expected_less_after, CastName))
+ return ExprError();
+
+ TypeResult CastTy = ParseTypeName();
+ SourceLocation RAngleBracketLoc = Tok.getLocation();
+
+ if (ExpectAndConsume(tok::greater, diag::err_expected_greater))
+ return ExprError(Diag(LAngleBracketLoc, diag::note_matching) << "<");
+
+ SourceLocation LParenLoc = Tok.getLocation(), RParenLoc;
+
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after, CastName))
+ return ExprError();
+
+ OwningExprResult Result = ParseExpression();
+
+ // Match the ')'.
+ if (Result.isInvalid())
+ SkipUntil(tok::r_paren);
+
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ if (!Result.isInvalid() && !CastTy.isInvalid())
+ Result = Actions.ActOnCXXNamedCast(OpLoc, Kind,
+ LAngleBracketLoc, CastTy.get(),
+ RAngleBracketLoc,
+ LParenLoc, move(Result), RParenLoc);
+
+ return move(Result);
+}
+
+/// ParseCXXTypeid - This handles the C++ typeid expression.
+///
+/// postfix-expression: [C++ 5.2p1]
+/// 'typeid' '(' expression ')'
+/// 'typeid' '(' type-id ')'
+///
+Parser::OwningExprResult Parser::ParseCXXTypeid() {
+ assert(Tok.is(tok::kw_typeid) && "Not 'typeid'!");
+
+ SourceLocation OpLoc = ConsumeToken();
+ SourceLocation LParenLoc = Tok.getLocation();
+ SourceLocation RParenLoc;
+
+ // typeid expressions are always parenthesized.
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
+ "typeid"))
+ return ExprError();
+
+ OwningExprResult Result(Actions);
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/true,
+ Ty.get(), RParenLoc);
+ } else {
+ Result = ParseExpression();
+
+ // Match the ')'.
+ if (Result.isInvalid())
+ SkipUntil(tok::r_paren);
+ else {
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/false,
+ Result.release(), RParenLoc);
+ }
+ }
+
+ return move(Result);
+}
+
+/// ParseCXXBoolLiteral - This handles the C++ Boolean literals.
+///
+/// boolean-literal: [C++ 2.13.5]
+/// 'true'
+/// 'false'
+Parser::OwningExprResult Parser::ParseCXXBoolLiteral() {
+ tok::TokenKind Kind = Tok.getKind();
+ return Actions.ActOnCXXBoolLiteral(ConsumeToken(), Kind);
+}
+
+/// ParseThrowExpression - This handles the C++ throw expression.
+///
+/// throw-expression: [C++ 15]
+/// 'throw' assignment-expression[opt]
+Parser::OwningExprResult Parser::ParseThrowExpression() {
+ assert(Tok.is(tok::kw_throw) && "Not throw!");
+ SourceLocation ThrowLoc = ConsumeToken(); // Eat the throw token.
+
+ // If the current token isn't the start of an assignment-expression,
+ // then the expression is not present. This handles things like:
+ // "C ? throw : (void)42", which is crazy but legal.
+ switch (Tok.getKind()) { // FIXME: move this predicate somewhere common.
+ case tok::semi:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::r_brace:
+ case tok::colon:
+ case tok::comma:
+ return Actions.ActOnCXXThrow(ThrowLoc, ExprArg(Actions));
+
+ default:
+ OwningExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid()) return move(Expr);
+ return Actions.ActOnCXXThrow(ThrowLoc, move(Expr));
+ }
+}
+
+/// ParseCXXThis - This handles the C++ 'this' pointer.
+///
+/// C++ 9.3.2: In the body of a non-static member function, the keyword this is
+/// a non-lvalue expression whose value is the address of the object for which
+/// the function is called.
+Parser::OwningExprResult Parser::ParseCXXThis() {
+ assert(Tok.is(tok::kw_this) && "Not 'this'!");
+ SourceLocation ThisLoc = ConsumeToken();
+ return Actions.ActOnCXXThis(ThisLoc);
+}
+
+/// ParseCXXTypeConstructExpression - Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
+///
+/// postfix-expression: [C++ 5.2p1]
+/// simple-type-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
+/// typename-specifier '(' expression-list[opt] ')' [TODO]
+///
+Parser::OwningExprResult
+Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeTy *TypeRep = Actions.ActOnTypeName(CurScope, DeclaratorInfo).get();
+
+ assert(Tok.is(tok::l_paren) && "Expected '('!");
+ SourceLocation LParenLoc = ConsumeParen();
+
+ ExprVector Exprs(Actions);
+ CommaLocsTy CommaLocs;
+
+ if (Tok.isNot(tok::r_paren)) {
+ if (ParseExpressionList(Exprs, CommaLocs)) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ }
+
+ // Match the ')'.
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ assert((Exprs.size() == 0 || Exprs.size()-1 == CommaLocs.size())&&
+ "Unexpected number of commas!");
+ return Actions.ActOnCXXTypeConstructExpr(DS.getSourceRange(), TypeRep,
+ LParenLoc, move_arg(Exprs),
+ CommaLocs.data(), RParenLoc);
+}
+
+/// ParseCXXCondition - if/switch/while/for condition expression.
+///
+/// condition:
+/// expression
+/// type-specifier-seq declarator '=' assignment-expression
+/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
+/// '=' assignment-expression
+///
+Parser::OwningExprResult Parser::ParseCXXCondition() {
+ if (!isCXXConditionDeclaration())
+ return ParseExpression(); // expression
+
+ SourceLocation StartLoc = Tok.getLocation();
+
+ // type-specifier-seq
+ DeclSpec DS;
+ ParseSpecifierQualifierList(DS);
+
+ // declarator
+ Declarator DeclaratorInfo(DS, Declarator::ConditionContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // simple-asm-expr[opt]
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ OwningExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid()) {
+ SkipUntil(tok::semi);
+ return ExprError();
+ }
+ DeclaratorInfo.setAsmLabel(AsmLabel.release());
+ DeclaratorInfo.SetRangeEnd(Loc);
+ }
+
+ // If attributes are present, parse them.
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseAttributes(&Loc);
+ DeclaratorInfo.AddAttributes(AttrList, Loc);
+ }
+
+ // '=' assignment-expression
+ if (Tok.isNot(tok::equal))
+ return ExprError(Diag(Tok, diag::err_expected_equal_after_declarator));
+ SourceLocation EqualLoc = ConsumeToken();
+ OwningExprResult AssignExpr(ParseAssignmentExpression());
+ if (AssignExpr.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnCXXConditionDeclarationExpr(CurScope, StartLoc,
+ DeclaratorInfo,EqualLoc,
+ move(AssignExpr));
+}
+
+/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
+/// This should only be called when the current token is known to be part of
+/// simple-type-specifier.
+///
+/// simple-type-specifier:
+/// '::'[opt] nested-name-specifier[opt] type-name
+/// '::'[opt] nested-name-specifier 'template' simple-template-id [TODO]
+/// char
+/// wchar_t
+/// bool
+/// short
+/// int
+/// long
+/// signed
+/// unsigned
+/// float
+/// double
+/// void
+/// [GNU] typeof-specifier
+/// [C++0x] auto [TODO]
+///
+/// type-name:
+/// class-name
+/// enum-name
+/// typedef-name
+///
+void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
+ DS.SetRangeStart(Tok.getLocation());
+ const char *PrevSpec;
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ case tok::identifier: // foo::bar
+ case tok::coloncolon: // ::foo::bar
+ assert(0 && "Annotation token should already be formed!");
+ default:
+ assert(0 && "Not a simple-type-specifier token!");
+ abort();
+
+ // type-name
+ case tok::annot_typename: {
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ Tok.getAnnotationValue());
+ break;
+ }
+
+ // builtin types
+ case tok::kw_short:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec);
+ break;
+ case tok::kw_long:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec);
+ break;
+ case tok::kw_signed:
+ DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec);
+ break;
+ case tok::kw_unsigned:
+ DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec);
+ break;
+ case tok::kw_void:
+ DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec);
+ break;
+ case tok::kw_char:
+ DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec);
+ break;
+ case tok::kw_int:
+ DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec);
+ break;
+ case tok::kw_float:
+ DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec);
+ break;
+ case tok::kw_double:
+ DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec);
+ break;
+ case tok::kw_wchar_t:
+ DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec);
+ break;
+ case tok::kw_bool:
+ DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec);
+ break;
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+ ParseTypeofSpecifier(DS);
+ DS.Finish(Diags, PP);
+ return;
+ }
+ if (Tok.is(tok::annot_typename))
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ else
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+ DS.Finish(Diags, PP);
+}
+
+/// ParseCXXTypeSpecifierSeq - Parse a C++ type-specifier-seq (C++
+/// [dcl.name]), which is a non-empty sequence of type-specifiers,
+/// e.g., "const short int". Note that the DeclSpec is *not* finished
+/// by parsing the type-specifier-seq, because these sequences are
+/// typically followed by some form of declarator. Returns true and
+/// emits diagnostics if this is not a type-specifier-seq, false
+/// otherwise.
+///
+/// type-specifier-seq: [C++ 8.1]
+/// type-specifier type-specifier-seq[opt]
+///
+bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
+ DS.SetRangeStart(Tok.getLocation());
+ const char *PrevSpec = 0;
+ int isInvalid = 0;
+
+ // Parse one or more of the type specifiers.
+ if (!ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec)) {
+ Diag(Tok, diag::err_operator_missing_type_specifier);
+ return true;
+ }
+
+ while (ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec)) ;
+
+ return false;
+}
+
+/// TryParseOperatorFunctionId - Attempts to parse a C++ overloaded
+/// operator name (C++ [over.oper]). If successful, returns the
+/// predefined identifier that corresponds to that overloaded
+/// operator. Otherwise, returns NULL and does not consume any tokens.
+///
+/// operator-function-id: [C++ 13.5]
+/// 'operator' operator
+///
+/// operator: one of
+/// new delete new[] delete[]
+/// + - * / % ^ & | ~
+/// ! = < > += -= *= /= %=
+/// ^= &= |= << >> >>= <<= == !=
+/// <= >= && || ++ -- , ->* ->
+/// () []
+OverloadedOperatorKind
+Parser::TryParseOperatorFunctionId(SourceLocation *EndLoc) {
+ assert(Tok.is(tok::kw_operator) && "Expected 'operator' keyword");
+ SourceLocation Loc;
+
+ OverloadedOperatorKind Op = OO_None;
+ switch (NextToken().getKind()) {
+ case tok::kw_new:
+ ConsumeToken(); // 'operator'
+ Loc = ConsumeToken(); // 'new'
+ if (Tok.is(tok::l_square)) {
+ ConsumeBracket(); // '['
+ Loc = Tok.getLocation();
+ ExpectAndConsume(tok::r_square, diag::err_expected_rsquare); // ']'
+ Op = OO_Array_New;
+ } else {
+ Op = OO_New;
+ }
+ if (EndLoc)
+ *EndLoc = Loc;
+ return Op;
+
+ case tok::kw_delete:
+ ConsumeToken(); // 'operator'
+ Loc = ConsumeToken(); // 'delete'
+ if (Tok.is(tok::l_square)) {
+ ConsumeBracket(); // '['
+ Loc = Tok.getLocation();
+ ExpectAndConsume(tok::r_square, diag::err_expected_rsquare); // ']'
+ Op = OO_Array_Delete;
+ } else {
+ Op = OO_Delete;
+ }
+ if (EndLoc)
+ *EndLoc = Loc;
+ return Op;
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case tok::Token: Op = OO_##Name; break;
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+
+ case tok::l_paren:
+ ConsumeToken(); // 'operator'
+ ConsumeParen(); // '('
+ Loc = Tok.getLocation();
+ ExpectAndConsume(tok::r_paren, diag::err_expected_rparen); // ')'
+ if (EndLoc)
+ *EndLoc = Loc;
+ return OO_Call;
+
+ case tok::l_square:
+ ConsumeToken(); // 'operator'
+ ConsumeBracket(); // '['
+ Loc = Tok.getLocation();
+ ExpectAndConsume(tok::r_square, diag::err_expected_rsquare); // ']'
+ if (EndLoc)
+ *EndLoc = Loc;
+ return OO_Subscript;
+
+ default:
+ return OO_None;
+ }
+
+ ConsumeToken(); // 'operator'
+ Loc = ConsumeAnyToken(); // the operator itself
+ if (EndLoc)
+ *EndLoc = Loc;
+ return Op;
+}
+
+/// ParseConversionFunctionId - Parse a C++ conversion-function-id,
+/// which expresses the name of a user-defined conversion operator
+/// (C++ [class.conv.fct]p1). Returns the type that this operator is
+/// specifying a conversion for, or NULL if there was an error.
+///
+/// conversion-function-id: [C++ 12.3.2]
+/// operator conversion-type-id
+///
+/// conversion-type-id:
+/// type-specifier-seq conversion-declarator[opt]
+///
+/// conversion-declarator:
+/// ptr-operator conversion-declarator[opt]
+Parser::TypeTy *Parser::ParseConversionFunctionId(SourceLocation *EndLoc) {
+ assert(Tok.is(tok::kw_operator) && "Expected 'operator' keyword");
+ ConsumeToken(); // 'operator'
+
+ // Parse the type-specifier-seq.
+ DeclSpec DS;
+ if (ParseCXXTypeSpecifierSeq(DS))
+ return 0;
+
+ // Parse the conversion-declarator, which is merely a sequence of
+ // ptr-operators.
+ Declarator D(DS, Declarator::TypeNameContext);
+ ParseDeclaratorInternal(D, /*DirectDeclParser=*/0);
+ if (EndLoc)
+ *EndLoc = D.getSourceRange().getEnd();
+
+ // Finish up the type.
+ Action::TypeResult Result = Actions.ActOnTypeName(CurScope, D);
+ if (Result.isInvalid())
+ return 0;
+ else
+ return Result.get();
+}
+
+/// ParseCXXNewExpression - Parse a C++ new-expression. New is used to allocate
+/// memory in a typesafe manner and call constructors.
+///
+/// This method is called to parse the new expression after the optional :: has
+/// been already parsed. If the :: was present, "UseGlobal" is true and "Start"
+/// is its location. Otherwise, "Start" is the location of the 'new' token.
+///
+/// new-expression:
+/// '::'[opt] 'new' new-placement[opt] new-type-id
+/// new-initializer[opt]
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// new-placement:
+/// '(' expression-list ')'
+///
+/// new-type-id:
+/// type-specifier-seq new-declarator[opt]
+///
+/// new-declarator:
+/// ptr-operator new-declarator[opt]
+/// direct-new-declarator
+///
+/// new-initializer:
+/// '(' expression-list[opt] ')'
+/// [C++0x] braced-init-list [TODO]
+///
+Parser::OwningExprResult
+Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
+ assert(Tok.is(tok::kw_new) && "expected 'new' token");
+ ConsumeToken(); // Consume 'new'
+
+ // A '(' now can be a new-placement or the '(' wrapping the type-id in the
+ // second form of new-expression. It can't be a new-type-id.
+
+ ExprVector PlacementArgs(Actions);
+ SourceLocation PlacementLParen, PlacementRParen;
+
+ bool ParenTypeId;
+ DeclSpec DS;
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ if (Tok.is(tok::l_paren)) {
+ // If it turns out to be a placement, we change the type location.
+ PlacementLParen = ConsumeParen();
+ if (ParseExpressionListOrTypeId(PlacementArgs, DeclaratorInfo)) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+
+ PlacementRParen = MatchRHSPunctuation(tok::r_paren, PlacementLParen);
+ if (PlacementRParen.isInvalid()) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+
+ if (PlacementArgs.empty()) {
+ // Reset the placement locations. There was no placement.
+ PlacementLParen = PlacementRParen = SourceLocation();
+ ParenTypeId = true;
+ } else {
+ // We still need the type.
+ if (Tok.is(tok::l_paren)) {
+ SourceLocation LParen = ConsumeParen();
+ ParseSpecifierQualifierList(DS);
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclarator(DeclaratorInfo);
+ MatchRHSPunctuation(tok::r_paren, LParen);
+ ParenTypeId = true;
+ } else {
+ if (ParseCXXTypeSpecifierSeq(DS))
+ DeclaratorInfo.setInvalidType(true);
+ else {
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclaratorInternal(DeclaratorInfo,
+ &Parser::ParseDirectNewDeclarator);
+ }
+ ParenTypeId = false;
+ }
+ }
+ } else {
+ // A new-type-id is a simplified type-id, where essentially the
+ // direct-declarator is replaced by a direct-new-declarator.
+ if (ParseCXXTypeSpecifierSeq(DS))
+ DeclaratorInfo.setInvalidType(true);
+ else {
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclaratorInternal(DeclaratorInfo,
+ &Parser::ParseDirectNewDeclarator);
+ }
+ ParenTypeId = false;
+ }
+ if (DeclaratorInfo.isInvalidType()) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+
+ ExprVector ConstructorArgs(Actions);
+ SourceLocation ConstructorLParen, ConstructorRParen;
+
+ if (Tok.is(tok::l_paren)) {
+ ConstructorLParen = ConsumeParen();
+ if (Tok.isNot(tok::r_paren)) {
+ CommaLocsTy CommaLocs;
+ if (ParseExpressionList(ConstructorArgs, CommaLocs)) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+ }
+ ConstructorRParen = MatchRHSPunctuation(tok::r_paren, ConstructorLParen);
+ if (ConstructorRParen.isInvalid()) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+ }
+
+ return Actions.ActOnCXXNew(Start, UseGlobal, PlacementLParen,
+ move_arg(PlacementArgs), PlacementRParen,
+ ParenTypeId, DeclaratorInfo, ConstructorLParen,
+ move_arg(ConstructorArgs), ConstructorRParen);
+}
+
+/// ParseDirectNewDeclarator - Parses a direct-new-declarator. Intended to be
+/// passed to ParseDeclaratorInternal.
+///
+/// direct-new-declarator:
+/// '[' expression ']'
+/// direct-new-declarator '[' constant-expression ']'
+///
+void Parser::ParseDirectNewDeclarator(Declarator &D) {
+ // Parse the array dimensions.
+ bool first = true;
+ while (Tok.is(tok::l_square)) {
+ SourceLocation LLoc = ConsumeBracket();
+ OwningExprResult Size(first ? ParseExpression()
+ : ParseConstantExpression());
+ if (Size.isInvalid()) {
+ // Recover
+ SkipUntil(tok::r_square);
+ return;
+ }
+ first = false;
+
+ SourceLocation RLoc = MatchRHSPunctuation(tok::r_square, LLoc);
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, /*static=*/false, /*star=*/false,
+ Size.release(), LLoc),
+ RLoc);
+
+ if (RLoc.isInvalid())
+ return;
+ }
+}
+
+/// ParseExpressionListOrTypeId - Parse either an expression-list or a type-id.
+/// This ambiguity appears in the syntax of the C++ new operator.
+///
+/// new-expression:
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// new-placement:
+/// '(' expression-list ')'
+///
+bool Parser::ParseExpressionListOrTypeId(ExprListTy &PlacementArgs,
+ Declarator &D) {
+ // The '(' was already consumed.
+ if (isTypeIdInParens()) {
+ ParseSpecifierQualifierList(D.getMutableDeclSpec());
+ D.SetSourceRange(D.getDeclSpec().getSourceRange());
+ ParseDeclarator(D);
+ return D.isInvalidType();
+ }
+
+ // It's not a type, it has to be an expression list.
+ // Discard the comma locations - ActOnCXXNew has enough parameters.
+ CommaLocsTy CommaLocs;
+ return ParseExpressionList(PlacementArgs, CommaLocs);
+}
+
+/// ParseCXXDeleteExpression - Parse a C++ delete-expression. Delete is used
+/// to free memory allocated by new.
+///
+/// This method is called to parse the 'delete' expression after the optional
+/// '::' has been already parsed. If the '::' was present, "UseGlobal" is true
+/// and "Start" is its location. Otherwise, "Start" is the location of the
+/// 'delete' token.
+///
+/// delete-expression:
+/// '::'[opt] 'delete' cast-expression
+/// '::'[opt] 'delete' '[' ']' cast-expression
+Parser::OwningExprResult
+Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
+ assert(Tok.is(tok::kw_delete) && "Expected 'delete' keyword");
+ ConsumeToken(); // Consume 'delete'
+
+ // Array delete?
+ bool ArrayDelete = false;
+ if (Tok.is(tok::l_square)) {
+ ArrayDelete = true;
+ SourceLocation LHS = ConsumeBracket();
+ SourceLocation RHS = MatchRHSPunctuation(tok::r_square, LHS);
+ if (RHS.isInvalid())
+ return ExprError();
+ }
+
+ OwningExprResult Operand(ParseCastExpression(false));
+ if (Operand.isInvalid())
+ return move(Operand);
+
+ return Actions.ActOnCXXDelete(Start, UseGlobal, ArrayDelete, move(Operand));
+}
+
+static UnaryTypeTrait UnaryTypeTraitFromTokKind(tok::TokenKind kind)
+{
+ switch(kind) {
+ default: assert(false && "Not a known unary type trait.");
+ case tok::kw___has_nothrow_assign: return UTT_HasNothrowAssign;
+ case tok::kw___has_nothrow_copy: return UTT_HasNothrowCopy;
+ case tok::kw___has_nothrow_constructor: return UTT_HasNothrowConstructor;
+ case tok::kw___has_trivial_assign: return UTT_HasTrivialAssign;
+ case tok::kw___has_trivial_copy: return UTT_HasTrivialCopy;
+ case tok::kw___has_trivial_constructor: return UTT_HasTrivialConstructor;
+ case tok::kw___has_trivial_destructor: return UTT_HasTrivialDestructor;
+ case tok::kw___has_virtual_destructor: return UTT_HasVirtualDestructor;
+ case tok::kw___is_abstract: return UTT_IsAbstract;
+ case tok::kw___is_class: return UTT_IsClass;
+ case tok::kw___is_empty: return UTT_IsEmpty;
+ case tok::kw___is_enum: return UTT_IsEnum;
+ case tok::kw___is_pod: return UTT_IsPOD;
+ case tok::kw___is_polymorphic: return UTT_IsPolymorphic;
+ case tok::kw___is_union: return UTT_IsUnion;
+ }
+}
+
+/// ParseUnaryTypeTrait - Parse the built-in unary type-trait
+/// pseudo-functions that allow implementation of the TR1/C++0x type traits
+/// templates.
+///
+/// primary-expression:
+/// [GNU] unary-type-trait '(' type-id ')'
+///
+Parser::OwningExprResult Parser::ParseUnaryTypeTrait()
+{
+ UnaryTypeTrait UTT = UnaryTypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ SourceLocation LParen = Tok.getLocation();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen))
+ return ExprError();
+
+ // FIXME: Error reporting absolutely sucks! If the this fails to parse a type
+ // there will be cryptic errors about mismatched parentheses and missing
+ // specifiers.
+ TypeResult Ty = ParseTypeName();
+
+ SourceLocation RParen = MatchRHSPunctuation(tok::r_paren, LParen);
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnUnaryTypeTrait(UTT, Loc, LParen, Ty.get(), RParen);
+}
+
+/// ParseCXXAmbiguousParenExpression - We have parsed the left paren of a
+/// parenthesized ambiguous type-id. This uses tentative parsing to disambiguate
+/// based on the context past the parens.
+Parser::OwningExprResult
+Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
+ TypeTy *&CastTy,
+ SourceLocation LParenLoc,
+ SourceLocation &RParenLoc) {
+ assert(getLang().CPlusPlus && "Should only be called for C++!");
+ assert(ExprType == CastExpr && "Compound literals are not ambiguous!");
+ assert(isTypeIdInParens() && "Not a type-id!");
+
+ OwningExprResult Result(Actions, true);
+ CastTy = 0;
+
+ // We need to disambiguate a very ugly part of the C++ syntax:
+ //
+ // (T())x; - type-id
+ // (T())*x; - type-id
+ // (T())/x; - expression
+ // (T()); - expression
+ //
+ // The bad news is that we cannot use the specialized tentative parser, since
+ // it can only verify that the thing inside the parens can be parsed as
+ // type-id, it is not useful for determining the context past the parens.
+ //
+ // The good news is that the parser can disambiguate this part without
+ // making any unnecessary Action calls.
+ //
+ // It uses a scheme similar to parsing inline methods. The parenthesized
+ // tokens are cached, the context that follows is determined (possibly by
+ // parsing a cast-expression), and then we re-introduce the cached tokens
+ // into the token stream and parse them appropriately.
+
+ ParenParseOption ParseAs;
+ CachedTokens Toks;
+
+ // Store the tokens of the parentheses. We will parse them after we determine
+ // the context that follows them.
+ if (!ConsumeAndStoreUntil(tok::r_paren, tok::unknown, Toks, tok::semi)) {
+ // We didn't find the ')' we expected.
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ return ExprError();
+ }
+
+ if (Tok.is(tok::l_brace)) {
+ ParseAs = CompoundLiteral;
+ } else {
+ bool NotCastExpr;
+ // FIXME: Special-case ++ and --: "(S())++;" is not a cast-expression
+ if (Tok.is(tok::l_paren) && NextToken().is(tok::r_paren)) {
+ NotCastExpr = true;
+ } else {
+ // Try parsing the cast-expression that may follow.
+ // If it is not a cast-expression, NotCastExpr will be true and no token
+ // will be consumed.
+ Result = ParseCastExpression(false/*isUnaryExpression*/,
+ false/*isAddressofOperand*/,
+ NotCastExpr);
+ }
+
+ // If we parsed a cast-expression, it's really a type-id, otherwise it's
+ // an expression.
+ ParseAs = NotCastExpr ? SimpleExpr : CastExpr;
+ }
+
+ // The current token should go after the cached tokens.
+ Toks.push_back(Tok);
+ // Re-enter the stored parenthesized tokens into the token stream, so we may
+ // parse them now.
+ PP.EnterTokenStream(Toks.data(), Toks.size(),
+ true/*DisableMacroExpansion*/, false/*OwnsTokens*/);
+ // Drop the current token and bring the first cached one. It's the same token
+ // as when we entered this function.
+ ConsumeAnyToken();
+
+ if (ParseAs >= CompoundLiteral) {
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ if (ParseAs == CompoundLiteral) {
+ ExprType = CompoundLiteral;
+ return ParseCompoundLiteralExpression(Ty.get(), LParenLoc, RParenLoc);
+ }
+
+ // We parsed '(' type-id ')' and the thing after it wasn't a '{'.
+ assert(ParseAs == CastExpr);
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ CastTy = Ty.get();
+
+ // Result is what ParseCastExpression returned earlier.
+ if (!Result.isInvalid())
+ Result = Actions.ActOnCastExpr(LParenLoc, CastTy, RParenLoc,move(Result));
+ return move(Result);
+ }
+
+ // Not a compound literal, and not followed by a cast-expression.
+ assert(ParseAs == SimpleExpr);
+
+ ExprType = SimpleExpr;
+ Result = ParseExpression();
+ if (!Result.isInvalid() && Tok.is(tok::r_paren))
+ Result = Actions.ActOnParenExpr(LParenLoc, Tok.getLocation(), move(Result));
+
+ // Match the ')'.
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ return move(Result);
+}
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
new file mode 100644
index 0000000..bbc2124
--- /dev/null
+++ b/lib/Parse/ParseInit.cpp
@@ -0,0 +1,308 @@
+//===--- ParseInit.cpp - Initializer Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements initializer parsing as specified by C99 6.7.8.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Designator.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+
+/// MayBeDesignationStart - Return true if this token might be the start of a
+/// designator. If we can tell it is impossible that it is a designator, return
+/// false.
+static bool MayBeDesignationStart(tok::TokenKind K, Preprocessor &PP) {
+ switch (K) {
+ default: return false;
+ case tok::period: // designator: '.' identifier
+ case tok::l_square: // designator: array-designator
+ return true;
+ case tok::identifier: // designation: identifier ':'
+ return PP.LookAhead(0).is(tok::colon);
+ }
+}
+
+/// ParseInitializerWithPotentialDesignator - Parse the 'initializer' production
+/// checking to see if the token stream starts with a designator.
+///
+/// designation:
+/// designator-list '='
+/// [GNU] array-designator
+/// [GNU] identifier ':'
+///
+/// designator-list:
+/// designator
+/// designator-list designator
+///
+/// designator:
+/// array-designator
+/// '.' identifier
+///
+/// array-designator:
+/// '[' constant-expression ']'
+/// [GNU] '[' constant-expression '...' constant-expression ']'
+///
+/// NOTE: [OBC] allows '[ objc-receiver objc-message-args ]' as an
+/// initializer (because it is an expression). We need to consider this case
+/// when parsing array designators.
+///
+Parser::OwningExprResult Parser::ParseInitializerWithPotentialDesignator() {
+
+ // If this is the old-style GNU extension:
+ // designation ::= identifier ':'
+ // Handle it as a field designator. Otherwise, this must be the start of a
+ // normal expression.
+ if (Tok.is(tok::identifier)) {
+ const IdentifierInfo *FieldName = Tok.getIdentifierInfo();
+
+ std::string NewSyntax(".");
+ NewSyntax += FieldName->getName();
+ NewSyntax += " = ";
+
+ SourceLocation NameLoc = ConsumeToken(); // Eat the identifier.
+
+ assert(Tok.is(tok::colon) && "MayBeDesignationStart not working properly!");
+ SourceLocation ColonLoc = ConsumeToken();
+
+ Diag(Tok, diag::ext_gnu_old_style_field_designator)
+ << CodeModificationHint::CreateReplacement(SourceRange(NameLoc,
+ ColonLoc),
+ NewSyntax);
+
+ Designation D;
+ D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc));
+ return Actions.ActOnDesignatedInitializer(D, ColonLoc, true,
+ ParseInitializer());
+ }
+
+ // Desig - This is initialized when we see our first designator. We may have
+ // an objc message send with no designator, so we don't want to create this
+ // eagerly.
+ Designation Desig;
+
+ // Parse each designator in the designator list until we find an initializer.
+ while (Tok.is(tok::period) || Tok.is(tok::l_square)) {
+ if (Tok.is(tok::period)) {
+ // designator: '.' identifier
+ SourceLocation DotLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected_field_designator);
+ return ExprError();
+ }
+
+ Desig.AddDesignator(Designator::getField(Tok.getIdentifierInfo(), DotLoc,
+ Tok.getLocation()));
+ ConsumeToken(); // Eat the identifier.
+ continue;
+ }
+
+ // We must have either an array designator now or an objc message send.
+ assert(Tok.is(tok::l_square) && "Unexpected token!");
+
+ // Handle the two forms of array designator:
+ // array-designator: '[' constant-expression ']'
+ // array-designator: '[' constant-expression '...' constant-expression ']'
+ //
+ // Also, we have to handle the case where the expression after the
+ // designator an an objc message send: '[' objc-message-expr ']'.
+ // Interesting cases are:
+ // [foo bar] -> objc message send
+ // [foo] -> array designator
+ // [foo ... bar] -> array designator
+ // [4][foo bar] -> obsolete GNU designation with objc message send.
+ //
+ SourceLocation StartLoc = ConsumeBracket();
+
+ // If Objective-C is enabled and this is a typename or other identifier
+ // receiver, parse this as a message send expression.
+ if (getLang().ObjC1 && isTokObjCMessageIdentifierReceiver()) {
+ // If we have exactly one array designator, this used the GNU
+ // 'designation: array-designator' extension, otherwise there should be no
+ // designators at all!
+ if (Desig.getNumDesignators() == 1 &&
+ (Desig.getDesignator(0).isArrayDesignator() ||
+ Desig.getDesignator(0).isArrayRangeDesignator()))
+ Diag(StartLoc, diag::ext_gnu_missing_equal_designator);
+ else if (Desig.getNumDesignators() > 0)
+ Diag(Tok, diag::err_expected_equal_designator);
+
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = ConsumeToken();
+ return ParseAssignmentExprWithObjCMessageExprStart(
+ StartLoc, NameLoc, Name, ExprArg(Actions));
+ }
+
+ // Note that we parse this as an assignment expression, not a constant
+ // expression (allowing *=, =, etc) to handle the objc case. Sema needs
+ // to validate that the expression is a constant.
+ OwningExprResult Idx(ParseAssignmentExpression());
+ if (Idx.isInvalid()) {
+ SkipUntil(tok::r_square);
+ return move(Idx);
+ }
+
+ // Given an expression, we could either have a designator (if the next
+ // tokens are '...' or ']' or an objc message send. If this is an objc
+ // message send, handle it now. An objc-message send is the start of
+ // an assignment-expression production.
+ if (getLang().ObjC1 && Tok.isNot(tok::ellipsis) &&
+ Tok.isNot(tok::r_square)) {
+
+ // If we have exactly one array designator, this used the GNU
+ // 'designation: array-designator' extension, otherwise there should be no
+ // designators at all!
+ if (Desig.getNumDesignators() == 1 &&
+ (Desig.getDesignator(0).isArrayDesignator() ||
+ Desig.getDesignator(0).isArrayRangeDesignator()))
+ Diag(StartLoc, diag::ext_gnu_missing_equal_designator);
+ else if (Desig.getNumDesignators() > 0)
+ Diag(Tok, diag::err_expected_equal_designator);
+
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ SourceLocation(),
+ 0, move(Idx));
+ }
+
+ // If this is a normal array designator, remember it.
+ if (Tok.isNot(tok::ellipsis)) {
+ Desig.AddDesignator(Designator::getArray(Idx.release(), StartLoc));
+ } else {
+ // Handle the gnu array range extension.
+ Diag(Tok, diag::ext_gnu_array_range);
+ SourceLocation EllipsisLoc = ConsumeToken();
+
+ OwningExprResult RHS(ParseConstantExpression());
+ if (RHS.isInvalid()) {
+ SkipUntil(tok::r_square);
+ return move(RHS);
+ }
+ Desig.AddDesignator(Designator::getArrayRange(Idx.release(),
+ RHS.release(),
+ StartLoc, EllipsisLoc));
+ }
+
+ SourceLocation EndLoc = MatchRHSPunctuation(tok::r_square, StartLoc);
+ Desig.getDesignator(Desig.getNumDesignators() - 1).setRBracketLoc(EndLoc);
+ }
+
+ // Okay, we're done with the designator sequence. We know that there must be
+ // at least one designator, because the only case we can get into this method
+ // without a designator is when we have an objc message send. That case is
+ // handled and returned from above.
+ assert(!Desig.empty() && "Designator is empty?");
+
+ // Handle a normal designator sequence end, which is an equal.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = ConsumeToken();
+ return Actions.ActOnDesignatedInitializer(Desig, EqualLoc, false,
+ ParseInitializer());
+ }
+
+ // We read some number of designators and found something that isn't an = or
+ // an initializer. If we have exactly one array designator, this
+ // is the GNU 'designation: array-designator' extension. Otherwise, it is a
+ // parse error.
+ if (Desig.getNumDesignators() == 1 &&
+ (Desig.getDesignator(0).isArrayDesignator() ||
+ Desig.getDesignator(0).isArrayRangeDesignator())) {
+ Diag(Tok, diag::ext_gnu_missing_equal_designator)
+ << CodeModificationHint::CreateInsertion(Tok.getLocation(), "= ");
+ return Actions.ActOnDesignatedInitializer(Desig, Tok.getLocation(),
+ true, ParseInitializer());
+ }
+
+ Diag(Tok, diag::err_expected_equal_designator);
+ return ExprError();
+}
+
+
+/// ParseBraceInitializer - Called when parsing an initializer that has a
+/// leading open brace.
+///
+/// initializer: [C99 6.7.8]
+/// '{' initializer-list '}'
+/// '{' initializer-list ',' '}'
+/// [GNU] '{' '}'
+///
+/// initializer-list:
+/// designation[opt] initializer
+/// initializer-list ',' designation[opt] initializer
+///
+Parser::OwningExprResult Parser::ParseBraceInitializer() {
+ SourceLocation LBraceLoc = ConsumeBrace();
+
+ /// InitExprs - This is the actual list of expressions contained in the
+ /// initializer.
+ ExprVector InitExprs(Actions);
+
+ if (Tok.is(tok::r_brace)) {
+ // Empty initializers are a C++ feature and a GNU extension to C.
+ if (!getLang().CPlusPlus)
+ Diag(LBraceLoc, diag::ext_gnu_empty_initializer);
+ // Match the '}'.
+ return Actions.ActOnInitList(LBraceLoc, Action::MultiExprArg(Actions),
+ ConsumeBrace());
+ }
+
+ bool InitExprsOk = true;
+
+ while (1) {
+ // Parse: designation[opt] initializer
+
+ // If we know that this cannot be a designation, just parse the nested
+ // initializer directly.
+ OwningExprResult SubElt(Actions);
+ if (MayBeDesignationStart(Tok.getKind(), PP))
+ SubElt = ParseInitializerWithPotentialDesignator();
+ else
+ SubElt = ParseInitializer();
+
+ // If we couldn't parse the subelement, bail out.
+ if (!SubElt.isInvalid()) {
+ InitExprs.push_back(SubElt.release());
+ } else {
+ InitExprsOk = false;
+
+ // We have two ways to try to recover from this error: if the code looks
+ // gramatically ok (i.e. we have a comma coming up) try to continue
+ // parsing the rest of the initializer. This allows us to emit
+ // diagnostics for later elements that we find. If we don't see a comma,
+ // assume there is a parse error, and just skip to recover.
+ // FIXME: This comment doesn't sound right. If there is a r_brace
+ // immediately, it can't be an error, since there is no other way of
+ // leaving this loop except through this if.
+ if (Tok.isNot(tok::comma)) {
+ SkipUntil(tok::r_brace, false, true);
+ break;
+ }
+ }
+
+ // If we don't have a comma continued list, we're done.
+ if (Tok.isNot(tok::comma)) break;
+
+ // TODO: save comma locations if some client cares.
+ ConsumeToken();
+
+ // Handle trailing comma.
+ if (Tok.is(tok::r_brace)) break;
+ }
+ if (InitExprsOk && Tok.is(tok::r_brace))
+ return Actions.ActOnInitList(LBraceLoc, move_arg(InitExprs),
+ ConsumeBrace());
+
+ // Match the '}'.
+ MatchRHSPunctuation(tok::r_brace, LBraceLoc);
+ return ExprError(); // an error occurred.
+}
+
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
new file mode 100644
index 0000000..3014f95
--- /dev/null
+++ b/lib/Parse/ParseObjc.cpp
@@ -0,0 +1,1708 @@
+//===--- ParseObjC.cpp - Objective C Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Objective-C portions of the Parser interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+
+
+/// ParseObjCAtDirectives - Handle parts of the external-declaration production:
+/// external-declaration: [C99 6.9]
+/// [OBJC] objc-class-definition
+/// [OBJC] objc-class-declaration
+/// [OBJC] objc-alias-declaration
+/// [OBJC] objc-protocol-definition
+/// [OBJC] objc-method-definition
+/// [OBJC] '@' 'end'
+Parser::DeclPtrTy Parser::ParseObjCAtDirectives() {
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+
+ switch (Tok.getObjCKeywordID()) {
+ case tok::objc_class:
+ return ParseObjCAtClassDeclaration(AtLoc);
+ case tok::objc_interface:
+ return ParseObjCAtInterfaceDeclaration(AtLoc);
+ case tok::objc_protocol:
+ return ParseObjCAtProtocolDeclaration(AtLoc);
+ case tok::objc_implementation:
+ return ParseObjCAtImplementationDeclaration(AtLoc);
+ case tok::objc_end:
+ return ParseObjCAtEndDeclaration(AtLoc);
+ case tok::objc_compatibility_alias:
+ return ParseObjCAtAliasDeclaration(AtLoc);
+ case tok::objc_synthesize:
+ return ParseObjCPropertySynthesize(AtLoc);
+ case tok::objc_dynamic:
+ return ParseObjCPropertyDynamic(AtLoc);
+ default:
+ Diag(AtLoc, diag::err_unexpected_at);
+ SkipUntil(tok::semi);
+ return DeclPtrTy();
+ }
+}
+
+///
+/// objc-class-declaration:
+/// '@' 'class' identifier-list ';'
+///
+Parser::DeclPtrTy Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
+ ConsumeToken(); // the identifier "class"
+ llvm::SmallVector<IdentifierInfo *, 8> ClassNames;
+
+ while (1) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::semi);
+ return DeclPtrTy();
+ }
+ ClassNames.push_back(Tok.getIdentifierInfo());
+ ConsumeToken();
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ }
+
+ // Consume the ';'.
+ if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@class"))
+ return DeclPtrTy();
+
+ return Actions.ActOnForwardClassDeclaration(atLoc,
+ &ClassNames[0], ClassNames.size());
+}
+
+///
+/// objc-interface:
+/// objc-class-interface-attributes[opt] objc-class-interface
+/// objc-category-interface
+///
+/// objc-class-interface:
+/// '@' 'interface' identifier objc-superclass[opt]
+/// objc-protocol-refs[opt]
+/// objc-class-instance-variables[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-category-interface:
+/// '@' 'interface' identifier '(' identifier[opt] ')'
+/// objc-protocol-refs[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-superclass:
+/// ':' identifier
+///
+/// objc-class-interface-attributes:
+/// __attribute__((visibility("default")))
+/// __attribute__((visibility("hidden")))
+/// __attribute__((deprecated))
+/// __attribute__((unavailable))
+/// __attribute__((objc_exception)) - used by NSException on 64-bit
+///
+Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration(
+ SourceLocation atLoc, AttributeList *attrList) {
+ assert(Tok.isObjCAtKeyword(tok::objc_interface) &&
+ "ParseObjCAtInterfaceDeclaration(): Expected @interface");
+ ConsumeToken(); // the "interface" identifier
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing class or category name.
+ return DeclPtrTy();
+ }
+ // We have a class or category name - consume it.
+ IdentifierInfo *nameId = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken();
+
+ if (Tok.is(tok::l_paren)) { // we have a category.
+ SourceLocation lparenLoc = ConsumeParen();
+ SourceLocation categoryLoc, rparenLoc;
+ IdentifierInfo *categoryId = 0;
+
+ // For ObjC2, the category name is optional (not an error).
+ if (Tok.is(tok::identifier)) {
+ categoryId = Tok.getIdentifierInfo();
+ categoryLoc = ConsumeToken();
+ } else if (!getLang().ObjC2) {
+ Diag(Tok, diag::err_expected_ident); // missing category name.
+ return DeclPtrTy();
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ SkipUntil(tok::r_paren, false); // don't stop at ';'
+ return DeclPtrTy();
+ }
+ rparenLoc = ConsumeParen();
+
+ // Next, we need to check for any protocol references.
+ SourceLocation EndProtoLoc;
+ llvm::SmallVector<DeclPtrTy, 8> ProtocolRefs;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, true, EndProtoLoc))
+ return DeclPtrTy();
+
+ if (attrList) // categories don't support attributes.
+ Diag(Tok, diag::err_objc_no_attributes_on_category);
+
+ DeclPtrTy CategoryType =
+ Actions.ActOnStartCategoryInterface(atLoc,
+ nameId, nameLoc,
+ categoryId, categoryLoc,
+ ProtocolRefs.data(),
+ ProtocolRefs.size(),
+ EndProtoLoc);
+
+ ParseObjCInterfaceDeclList(CategoryType, tok::objc_not_keyword);
+ return CategoryType;
+ }
+ // Parse a class interface.
+ IdentifierInfo *superClassId = 0;
+ SourceLocation superClassLoc;
+
+ if (Tok.is(tok::colon)) { // a super class is specified.
+ ConsumeToken();
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing super class name.
+ return DeclPtrTy();
+ }
+ superClassId = Tok.getIdentifierInfo();
+ superClassLoc = ConsumeToken();
+ }
+ // Next, we need to check for any protocol references.
+ llvm::SmallVector<Action::DeclPtrTy, 8> ProtocolRefs;
+ SourceLocation EndProtoLoc;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, true, EndProtoLoc))
+ return DeclPtrTy();
+
+ DeclPtrTy ClsType =
+ Actions.ActOnStartClassInterface(atLoc, nameId, nameLoc,
+ superClassId, superClassLoc,
+ ProtocolRefs.data(), ProtocolRefs.size(),
+ EndProtoLoc, attrList);
+
+ if (Tok.is(tok::l_brace))
+ ParseObjCClassInstanceVariables(ClsType, atLoc);
+
+ ParseObjCInterfaceDeclList(ClsType, tok::objc_interface);
+ return ClsType;
+}
+
+/// objc-interface-decl-list:
+/// empty
+/// objc-interface-decl-list objc-property-decl [OBJC2]
+/// objc-interface-decl-list objc-method-requirement [OBJC2]
+/// objc-interface-decl-list objc-method-proto ';'
+/// objc-interface-decl-list declaration
+/// objc-interface-decl-list ';'
+///
+/// objc-method-requirement: [OBJC2]
+/// @required
+/// @optional
+///
+void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl,
+ tok::ObjCKeywordKind contextKey) {
+ llvm::SmallVector<DeclPtrTy, 32> allMethods;
+ llvm::SmallVector<DeclPtrTy, 16> allProperties;
+ llvm::SmallVector<DeclGroupPtrTy, 8> allTUVariables;
+ tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword;
+
+ SourceLocation AtEndLoc;
+
+ while (1) {
+ // If this is a method prototype, parse it.
+ if (Tok.is(tok::minus) || Tok.is(tok::plus)) {
+ DeclPtrTy methodPrototype =
+ ParseObjCMethodPrototype(interfaceDecl, MethodImplKind);
+ allMethods.push_back(methodPrototype);
+ // Consume the ';' here, since ParseObjCMethodPrototype() is re-used for
+ // method definitions.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_method_proto,
+ "", tok::semi);
+ continue;
+ }
+
+ // Ignore excess semicolons.
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ continue;
+ }
+
+ // If we got to the end of the file, exit the loop.
+ if (Tok.is(tok::eof))
+ break;
+
+ // If we don't have an @ directive, parse it as a function definition.
+ if (Tok.isNot(tok::at)) {
+ // The code below does not consume '}'s because it is afraid of eating the
+ // end of a namespace. Because of the way this code is structured, an
+ // erroneous r_brace would cause an infinite loop if not handled here.
+ if (Tok.is(tok::r_brace))
+ break;
+
+ // FIXME: as the name implies, this rule allows function definitions.
+ // We could pass a flag or check for functions during semantic analysis.
+ allTUVariables.push_back(ParseDeclarationOrFunctionDefinition());
+ continue;
+ }
+
+ // Otherwise, we have an @ directive, eat the @.
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+ tok::ObjCKeywordKind DirectiveKind = Tok.getObjCKeywordID();
+
+ if (DirectiveKind == tok::objc_end) { // @end -> terminate list
+ AtEndLoc = AtLoc;
+ break;
+ }
+
+ // Eat the identifier.
+ ConsumeToken();
+
+ switch (DirectiveKind) {
+ default:
+ // FIXME: If someone forgets an @end on a protocol, this loop will
+ // continue to eat up tons of stuff and spew lots of nonsense errors. It
+ // would probably be better to bail out if we saw an @class or @interface
+ // or something like that.
+ Diag(AtLoc, diag::err_objc_illegal_interface_qual);
+ // Skip until we see an '@' or '}' or ';'.
+ SkipUntil(tok::r_brace, tok::at);
+ break;
+
+ case tok::objc_required:
+ case tok::objc_optional:
+ // This is only valid on protocols.
+ // FIXME: Should this check for ObjC2 being enabled?
+ if (contextKey != tok::objc_protocol)
+ Diag(AtLoc, diag::err_objc_directive_only_in_protocol);
+ else
+ MethodImplKind = DirectiveKind;
+ break;
+
+ case tok::objc_property:
+ if (!getLang().ObjC2)
+ Diag(AtLoc, diag::err_objc_propertoes_require_objc2);
+
+ ObjCDeclSpec OCDS;
+ // Parse property attribute list, if any.
+ if (Tok.is(tok::l_paren))
+ ParseObjCPropertyAttribute(OCDS);
+
+ // Parse all the comma separated declarators.
+ DeclSpec DS;
+ llvm::SmallVector<FieldDeclarator, 8> FieldDeclarators;
+ ParseStructDeclaration(DS, FieldDeclarators);
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list, "",
+ tok::at);
+
+ // Convert them all to property declarations.
+ for (unsigned i = 0, e = FieldDeclarators.size(); i != e; ++i) {
+ FieldDeclarator &FD = FieldDeclarators[i];
+ if (FD.D.getIdentifier() == 0) {
+ Diag(AtLoc, diag::err_objc_property_requires_field_name)
+ << FD.D.getSourceRange();
+ continue;
+ }
+ if (FD.BitfieldSize) {
+ Diag(AtLoc, diag::err_objc_property_bitfield)
+ << FD.D.getSourceRange();
+ continue;
+ }
+
+ // Install the property declarator into interfaceDecl.
+ IdentifierInfo *SelName =
+ OCDS.getGetterName() ? OCDS.getGetterName() : FD.D.getIdentifier();
+
+ Selector GetterSel =
+ PP.getSelectorTable().getNullarySelector(SelName);
+ IdentifierInfo *SetterName = OCDS.getSetterName();
+ Selector SetterSel;
+ if (SetterName)
+ SetterSel = PP.getSelectorTable().getSelector(1, &SetterName);
+ else
+ SetterSel = SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(),
+ FD.D.getIdentifier());
+ bool isOverridingProperty = false;
+ DeclPtrTy Property = Actions.ActOnProperty(CurScope, AtLoc, FD, OCDS,
+ GetterSel, SetterSel,
+ interfaceDecl,
+ &isOverridingProperty,
+ MethodImplKind);
+ if (!isOverridingProperty)
+ allProperties.push_back(Property);
+ }
+ break;
+ }
+ }
+
+ // We break out of the big loop in two cases: when we see @end or when we see
+ // EOF. In the former case, eat the @end. In the later case, emit an error.
+ if (Tok.isObjCAtKeyword(tok::objc_end))
+ ConsumeToken(); // the "end" identifier
+ else
+ Diag(Tok, diag::err_objc_missing_end);
+
+ // Insert collected methods declarations into the @interface object.
+ // This passes in an invalid SourceLocation for AtEndLoc when EOF is hit.
+ Actions.ActOnAtEnd(AtEndLoc, interfaceDecl,
+ allMethods.data(), allMethods.size(),
+ allProperties.data(), allProperties.size(),
+ allTUVariables.data(), allTUVariables.size());
+}
+
+/// Parse property attribute declarations.
+///
+/// property-attr-decl: '(' property-attrlist ')'
+/// property-attrlist:
+/// property-attribute
+/// property-attrlist ',' property-attribute
+/// property-attribute:
+/// getter '=' identifier
+/// setter '=' identifier ':'
+/// readonly
+/// readwrite
+/// assign
+/// retain
+/// copy
+/// nonatomic
+///
+void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
+ assert(Tok.getKind() == tok::l_paren);
+ SourceLocation LHSLoc = ConsumeParen(); // consume '('
+
+ while (1) {
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ // If this is not an identifier at all, bail out early.
+ if (II == 0) {
+ MatchRHSPunctuation(tok::r_paren, LHSLoc);
+ return;
+ }
+
+ SourceLocation AttrName = ConsumeToken(); // consume last attribute name
+
+ if (II->isStr("readonly"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readonly);
+ else if (II->isStr("assign"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_assign);
+ else if (II->isStr("readwrite"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readwrite);
+ else if (II->isStr("retain"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_retain);
+ else if (II->isStr("copy"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_copy);
+ else if (II->isStr("nonatomic"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nonatomic);
+ else if (II->isStr("getter") || II->isStr("setter")) {
+ // getter/setter require extra treatment.
+ if (ExpectAndConsume(tok::equal, diag::err_objc_expected_equal, "",
+ tok::r_paren))
+ return;
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ if (II->getName()[0] == 's') {
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_setter);
+ DS.setSetterName(Tok.getIdentifierInfo());
+ ConsumeToken(); // consume method name
+
+ if (ExpectAndConsume(tok::colon, diag::err_expected_colon, "",
+ tok::r_paren))
+ return;
+ } else {
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_getter);
+ DS.setGetterName(Tok.getIdentifierInfo());
+ ConsumeToken(); // consume method name
+ }
+ } else {
+ Diag(AttrName, diag::err_objc_expected_property_attr) << II;
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ }
+
+ MatchRHSPunctuation(tok::r_paren, LHSLoc);
+}
+
+/// objc-method-proto:
+/// objc-instance-method objc-method-decl objc-method-attributes[opt]
+/// objc-class-method objc-method-decl objc-method-attributes[opt]
+///
+/// objc-instance-method: '-'
+/// objc-class-method: '+'
+///
+/// objc-method-attributes: [OBJC2]
+/// __attribute__((deprecated))
+///
+Parser::DeclPtrTy Parser::ParseObjCMethodPrototype(DeclPtrTy IDecl,
+ tok::ObjCKeywordKind MethodImplKind) {
+ assert((Tok.is(tok::minus) || Tok.is(tok::plus)) && "expected +/-");
+
+ tok::TokenKind methodType = Tok.getKind();
+ SourceLocation mLoc = ConsumeToken();
+
+ DeclPtrTy MDecl = ParseObjCMethodDecl(mLoc, methodType, IDecl,MethodImplKind);
+ // Since this rule is used for both method declarations and definitions,
+ // the caller is (optionally) responsible for consuming the ';'.
+ return MDecl;
+}
+
+/// objc-selector:
+/// identifier
+/// one of
+/// enum struct union if else while do for switch case default
+/// break continue return goto asm sizeof typeof __alignof
+/// unsigned long const short volatile signed restrict _Complex
+/// in out inout bycopy byref oneway int char float double void _Bool
+///
+IdentifierInfo *Parser::ParseObjCSelectorPiece(SourceLocation &SelectorLoc) {
+ switch (Tok.getKind()) {
+ default:
+ return 0;
+ case tok::identifier:
+ case tok::kw_asm:
+ case tok::kw_auto:
+ case tok::kw_bool:
+ case tok::kw_break:
+ case tok::kw_case:
+ case tok::kw_catch:
+ case tok::kw_char:
+ case tok::kw_class:
+ case tok::kw_const:
+ case tok::kw_const_cast:
+ case tok::kw_continue:
+ case tok::kw_default:
+ case tok::kw_delete:
+ case tok::kw_do:
+ case tok::kw_double:
+ case tok::kw_dynamic_cast:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_explicit:
+ case tok::kw_export:
+ case tok::kw_extern:
+ case tok::kw_false:
+ case tok::kw_float:
+ case tok::kw_for:
+ case tok::kw_friend:
+ case tok::kw_goto:
+ case tok::kw_if:
+ case tok::kw_inline:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw_mutable:
+ case tok::kw_namespace:
+ case tok::kw_new:
+ case tok::kw_operator:
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ case tok::kw_register:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_restrict:
+ case tok::kw_return:
+ case tok::kw_short:
+ case tok::kw_signed:
+ case tok::kw_sizeof:
+ case tok::kw_static:
+ case tok::kw_static_cast:
+ case tok::kw_struct:
+ case tok::kw_switch:
+ case tok::kw_template:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_try:
+ case tok::kw_typedef:
+ case tok::kw_typeid:
+ case tok::kw_typename:
+ case tok::kw_typeof:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_using:
+ case tok::kw_virtual:
+ case tok::kw_void:
+ case tok::kw_volatile:
+ case tok::kw_wchar_t:
+ case tok::kw_while:
+ case tok::kw__Bool:
+ case tok::kw__Complex:
+ case tok::kw___alignof:
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SelectorLoc = ConsumeToken();
+ return II;
+ }
+}
+
+/// objc-for-collection-in: 'in'
+///
+bool Parser::isTokIdentifier_in() const {
+ // FIXME: May have to do additional look-ahead to only allow for
+ // valid tokens following an 'in'; such as an identifier, unary operators,
+ // '[' etc.
+ return (getLang().ObjC2 && Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == ObjCTypeQuals[objc_in]);
+}
+
+/// ParseObjCTypeQualifierList - This routine parses the objective-c's type
+/// qualifier list and builds their bitmask representation in the input
+/// argument.
+///
+/// objc-type-qualifiers:
+/// objc-type-qualifier
+/// objc-type-qualifiers objc-type-qualifier
+///
+void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS) {
+ while (1) {
+ if (Tok.isNot(tok::identifier))
+ return;
+
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ for (unsigned i = 0; i != objc_NumQuals; ++i) {
+ if (II != ObjCTypeQuals[i])
+ continue;
+
+ ObjCDeclSpec::ObjCDeclQualifier Qual;
+ switch (i) {
+ default: assert(0 && "Unknown decl qualifier");
+ case objc_in: Qual = ObjCDeclSpec::DQ_In; break;
+ case objc_out: Qual = ObjCDeclSpec::DQ_Out; break;
+ case objc_inout: Qual = ObjCDeclSpec::DQ_Inout; break;
+ case objc_oneway: Qual = ObjCDeclSpec::DQ_Oneway; break;
+ case objc_bycopy: Qual = ObjCDeclSpec::DQ_Bycopy; break;
+ case objc_byref: Qual = ObjCDeclSpec::DQ_Byref; break;
+ }
+ DS.setObjCDeclQualifier(Qual);
+ ConsumeToken();
+ II = 0;
+ break;
+ }
+
+ // If this wasn't a recognized qualifier, bail out.
+ if (II) return;
+ }
+}
+
+/// objc-type-name:
+/// '(' objc-type-qualifiers[opt] type-name ')'
+/// '(' objc-type-qualifiers[opt] ')'
+///
+Parser::TypeTy *Parser::ParseObjCTypeName(ObjCDeclSpec &DS) {
+ assert(Tok.is(tok::l_paren) && "expected (");
+
+ SourceLocation LParenLoc = ConsumeParen();
+ SourceLocation TypeStartLoc = Tok.getLocation();
+
+ // Parse type qualifiers, in, inout, etc.
+ ParseObjCTypeQualifierList(DS);
+
+ TypeTy *Ty = 0;
+ if (isTypeSpecifierQualifier()) {
+ TypeResult TypeSpec = ParseTypeName();
+ if (!TypeSpec.isInvalid())
+ Ty = TypeSpec.get();
+ }
+
+ if (Tok.is(tok::r_paren))
+ ConsumeParen();
+ else if (Tok.getLocation() == TypeStartLoc) {
+ // If we didn't eat any tokens, then this isn't a type.
+ Diag(Tok, diag::err_expected_type);
+ SkipUntil(tok::r_paren);
+ } else {
+ // Otherwise, we found *something*, but didn't get a ')' in the right
+ // place. Emit an error then return what we have as the type.
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ }
+ return Ty;
+}
+
+/// objc-method-decl:
+/// objc-selector
+/// objc-keyword-selector objc-parmlist[opt]
+/// objc-type-name objc-selector
+/// objc-type-name objc-keyword-selector objc-parmlist[opt]
+///
+/// objc-keyword-selector:
+/// objc-keyword-decl
+/// objc-keyword-selector objc-keyword-decl
+///
+/// objc-keyword-decl:
+/// objc-selector ':' objc-type-name objc-keyword-attributes[opt] identifier
+/// objc-selector ':' objc-keyword-attributes[opt] identifier
+/// ':' objc-type-name objc-keyword-attributes[opt] identifier
+/// ':' objc-keyword-attributes[opt] identifier
+///
+/// objc-parmlist:
+/// objc-parms objc-ellipsis[opt]
+///
+/// objc-parms:
+/// objc-parms , parameter-declaration
+///
+/// objc-ellipsis:
+/// , ...
+///
+/// objc-keyword-attributes: [OBJC2]
+/// __attribute__((unused))
+///
+Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
+ tok::TokenKind mType,
+ DeclPtrTy IDecl,
+ tok::ObjCKeywordKind MethodImplKind) {
+ // Parse the return type if present.
+ TypeTy *ReturnType = 0;
+ ObjCDeclSpec DSRet;
+ if (Tok.is(tok::l_paren))
+ ReturnType = ParseObjCTypeName(DSRet);
+
+ SourceLocation selLoc;
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(selLoc);
+
+ // An unnamed colon is valid.
+ if (!SelIdent && Tok.isNot(tok::colon)) { // missing selector name.
+ Diag(Tok, diag::err_expected_selector_for_method)
+ << SourceRange(mLoc, Tok.getLocation());
+ // Skip until we get a ; or {}.
+ SkipUntil(tok::r_brace);
+ return DeclPtrTy();
+ }
+
+ llvm::SmallVector<Declarator, 8> CargNames;
+ if (Tok.isNot(tok::colon)) {
+ // If attributes exist after the method, parse them.
+ AttributeList *MethodAttrs = 0;
+ if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
+ MethodAttrs = ParseAttributes();
+
+ Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
+ return Actions.ActOnMethodDeclaration(mLoc, Tok.getLocation(),
+ mType, IDecl, DSRet, ReturnType, Sel,
+ 0, CargNames, MethodAttrs,
+ MethodImplKind);
+ }
+
+ llvm::SmallVector<IdentifierInfo *, 12> KeyIdents;
+ llvm::SmallVector<Action::ObjCArgInfo, 12> ArgInfos;
+
+ while (1) {
+ Action::ObjCArgInfo ArgInfo;
+
+ // Each iteration parses a single keyword argument.
+ if (Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_expected_colon);
+ break;
+ }
+ ConsumeToken(); // Eat the ':'.
+
+ ArgInfo.Type = 0;
+ if (Tok.is(tok::l_paren)) // Parse the argument type if present.
+ ArgInfo.Type = ParseObjCTypeName(ArgInfo.DeclSpec);
+
+ // If attributes exist before the argument name, parse them.
+ ArgInfo.ArgAttrs = 0;
+ if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
+ ArgInfo.ArgAttrs = ParseAttributes();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing argument name.
+ break;
+ }
+
+ ArgInfo.Name = Tok.getIdentifierInfo();
+ ArgInfo.NameLoc = Tok.getLocation();
+ ConsumeToken(); // Eat the identifier.
+
+ ArgInfos.push_back(ArgInfo);
+ KeyIdents.push_back(SelIdent);
+
+ // Check for another keyword selector.
+ SourceLocation Loc;
+ SelIdent = ParseObjCSelectorPiece(Loc);
+ if (!SelIdent && Tok.isNot(tok::colon))
+ break;
+ // We have a selector or a colon, continue parsing.
+ }
+
+ bool isVariadic = false;
+
+ // Parse the (optional) parameter list.
+ while (Tok.is(tok::comma)) {
+ ConsumeToken();
+ if (Tok.is(tok::ellipsis)) {
+ isVariadic = true;
+ ConsumeToken();
+ break;
+ }
+ DeclSpec DS;
+ ParseDeclarationSpecifiers(DS);
+ // Parse the declarator.
+ Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ ParseDeclarator(ParmDecl);
+ CargNames.push_back(ParmDecl);
+ }
+
+ // FIXME: Add support for optional parmameter list...
+ // If attributes exist after the method, parse them.
+ AttributeList *MethodAttrs = 0;
+ if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
+ MethodAttrs = ParseAttributes();
+
+ Selector Sel = PP.getSelectorTable().getSelector(KeyIdents.size(),
+ &KeyIdents[0]);
+ return Actions.ActOnMethodDeclaration(mLoc, Tok.getLocation(),
+ mType, IDecl, DSRet, ReturnType, Sel,
+ &ArgInfos[0], CargNames, MethodAttrs,
+ MethodImplKind, isVariadic);
+}
+
+/// objc-protocol-refs:
+/// '<' identifier-list '>'
+///
+bool Parser::
+ParseObjCProtocolReferences(llvm::SmallVectorImpl<Action::DeclPtrTy> &Protocols,
+ bool WarnOnDeclarations, SourceLocation &EndLoc) {
+ assert(Tok.is(tok::less) && "expected <");
+
+ ConsumeToken(); // the "<"
+
+ llvm::SmallVector<IdentifierLocPair, 8> ProtocolIdents;
+
+ while (1) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::greater);
+ return true;
+ }
+ ProtocolIdents.push_back(std::make_pair(Tok.getIdentifierInfo(),
+ Tok.getLocation()));
+ ConsumeToken();
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken();
+ }
+
+ // Consume the '>'.
+ if (Tok.isNot(tok::greater)) {
+ Diag(Tok, diag::err_expected_greater);
+ return true;
+ }
+
+ EndLoc = ConsumeAnyToken();
+
+ // Convert the list of protocols identifiers into a list of protocol decls.
+ Actions.FindProtocolDeclaration(WarnOnDeclarations,
+ &ProtocolIdents[0], ProtocolIdents.size(),
+ Protocols);
+ return false;
+}
+
+/// objc-class-instance-variables:
+/// '{' objc-instance-variable-decl-list[opt] '}'
+///
+/// objc-instance-variable-decl-list:
+/// objc-visibility-spec
+/// objc-instance-variable-decl ';'
+/// ';'
+/// objc-instance-variable-decl-list objc-visibility-spec
+/// objc-instance-variable-decl-list objc-instance-variable-decl ';'
+/// objc-instance-variable-decl-list ';'
+///
+/// objc-visibility-spec:
+/// @private
+/// @protected
+/// @public
+/// @package [OBJC2]
+///
+/// objc-instance-variable-decl:
+/// struct-declaration
+///
+void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
+ SourceLocation atLoc) {
+ assert(Tok.is(tok::l_brace) && "expected {");
+ llvm::SmallVector<DeclPtrTy, 32> AllIvarDecls;
+ llvm::SmallVector<FieldDeclarator, 8> FieldDeclarators;
+
+ ParseScope ClassScope(this, Scope::DeclScope|Scope::ClassScope);
+
+ SourceLocation LBraceLoc = ConsumeBrace(); // the "{"
+
+ tok::ObjCKeywordKind visibility = tok::objc_protected;
+ // While we still have something to read, read the instance variables.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one objc-instance-variable-decl.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_struct_semi);
+ ConsumeToken();
+ continue;
+ }
+
+ // Set the default visibility to private.
+ if (Tok.is(tok::at)) { // parse objc-visibility-spec
+ ConsumeToken(); // eat the @ sign
+ switch (Tok.getObjCKeywordID()) {
+ case tok::objc_private:
+ case tok::objc_public:
+ case tok::objc_protected:
+ case tok::objc_package:
+ visibility = Tok.getObjCKeywordID();
+ ConsumeToken();
+ continue;
+ default:
+ Diag(Tok, diag::err_objc_illegal_visibility_spec);
+ continue;
+ }
+ }
+
+ // Parse all the comma separated declarators.
+ DeclSpec DS;
+ FieldDeclarators.clear();
+ ParseStructDeclaration(DS, FieldDeclarators);
+
+ // Convert them all to fields.
+ for (unsigned i = 0, e = FieldDeclarators.size(); i != e; ++i) {
+ FieldDeclarator &FD = FieldDeclarators[i];
+ // Install the declarator into interfaceDecl.
+ DeclPtrTy Field = Actions.ActOnIvar(CurScope,
+ DS.getSourceRange().getBegin(),
+ FD.D, FD.BitfieldSize, visibility);
+ AllIvarDecls.push_back(Field);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_semi_decl_list);
+ // Skip to end of block or statement
+ SkipUntil(tok::r_brace, true, true);
+ }
+ }
+ SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc);
+ // Call ActOnFields() even if we don't have any decls. This is useful
+ // for code rewriting tools that need to be aware of the empty list.
+ Actions.ActOnFields(CurScope, atLoc, interfaceDecl,
+ AllIvarDecls.data(), AllIvarDecls.size(),
+ LBraceLoc, RBraceLoc, 0);
+ return;
+}
+
+/// objc-protocol-declaration:
+/// objc-protocol-definition
+/// objc-protocol-forward-reference
+///
+/// objc-protocol-definition:
+/// @protocol identifier
+/// objc-protocol-refs[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-protocol-forward-reference:
+/// @protocol identifier-list ';'
+///
+/// "@protocol identifier ;" should be resolved as "@protocol
+/// identifier-list ;": objc-interface-decl-list may not start with a
+/// semicolon in the first alternative if objc-protocol-refs are omitted.
+Parser::DeclPtrTy Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
+ AttributeList *attrList) {
+ assert(Tok.isObjCAtKeyword(tok::objc_protocol) &&
+ "ParseObjCAtProtocolDeclaration(): Expected @protocol");
+ ConsumeToken(); // the "protocol" identifier
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing protocol name.
+ return DeclPtrTy();
+ }
+ // Save the protocol name, then consume it.
+ IdentifierInfo *protocolName = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken();
+
+ if (Tok.is(tok::semi)) { // forward declaration of one protocol.
+ IdentifierLocPair ProtoInfo(protocolName, nameLoc);
+ ConsumeToken();
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, &ProtoInfo, 1,
+ attrList);
+ }
+
+ if (Tok.is(tok::comma)) { // list of forward declarations.
+ llvm::SmallVector<IdentifierLocPair, 8> ProtocolRefs;
+ ProtocolRefs.push_back(std::make_pair(protocolName, nameLoc));
+
+ // Parse the list of forward declarations.
+ while (1) {
+ ConsumeToken(); // the ','
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::semi);
+ return DeclPtrTy();
+ }
+ ProtocolRefs.push_back(IdentifierLocPair(Tok.getIdentifierInfo(),
+ Tok.getLocation()));
+ ConsumeToken(); // the identifier
+
+ if (Tok.isNot(tok::comma))
+ break;
+ }
+ // Consume the ';'.
+ if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@protocol"))
+ return DeclPtrTy();
+
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc,
+ &ProtocolRefs[0],
+ ProtocolRefs.size(),
+ attrList);
+ }
+
+ // Last, and definitely not least, parse a protocol declaration.
+ SourceLocation EndProtoLoc;
+
+ llvm::SmallVector<DeclPtrTy, 8> ProtocolRefs;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, false, EndProtoLoc))
+ return DeclPtrTy();
+
+ DeclPtrTy ProtoType =
+ Actions.ActOnStartProtocolInterface(AtLoc, protocolName, nameLoc,
+ ProtocolRefs.data(),
+ ProtocolRefs.size(),
+ EndProtoLoc, attrList);
+ ParseObjCInterfaceDeclList(ProtoType, tok::objc_protocol);
+ return ProtoType;
+}
+
+/// objc-implementation:
+/// objc-class-implementation-prologue
+/// objc-category-implementation-prologue
+///
+/// objc-class-implementation-prologue:
+/// @implementation identifier objc-superclass[opt]
+/// objc-class-instance-variables[opt]
+///
+/// objc-category-implementation-prologue:
+/// @implementation identifier ( identifier )
+Parser::DeclPtrTy Parser::ParseObjCAtImplementationDeclaration(
+ SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_implementation) &&
+ "ParseObjCAtImplementationDeclaration(): Expected @implementation");
+ ConsumeToken(); // the "implementation" identifier
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing class or category name.
+ return DeclPtrTy();
+ }
+ // We have a class or category name - consume it.
+ IdentifierInfo *nameId = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken(); // consume class or category name
+
+ if (Tok.is(tok::l_paren)) {
+ // we have a category implementation.
+ SourceLocation lparenLoc = ConsumeParen();
+ SourceLocation categoryLoc, rparenLoc;
+ IdentifierInfo *categoryId = 0;
+
+ if (Tok.is(tok::identifier)) {
+ categoryId = Tok.getIdentifierInfo();
+ categoryLoc = ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_ident); // missing category name.
+ return DeclPtrTy();
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ SkipUntil(tok::r_paren, false); // don't stop at ';'
+ return DeclPtrTy();
+ }
+ rparenLoc = ConsumeParen();
+ DeclPtrTy ImplCatType = Actions.ActOnStartCategoryImplementation(
+ atLoc, nameId, nameLoc, categoryId,
+ categoryLoc);
+ ObjCImpDecl = ImplCatType;
+ return DeclPtrTy();
+ }
+ // We have a class implementation
+ SourceLocation superClassLoc;
+ IdentifierInfo *superClassId = 0;
+ if (Tok.is(tok::colon)) {
+ // We have a super class
+ ConsumeToken();
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing super class name.
+ return DeclPtrTy();
+ }
+ superClassId = Tok.getIdentifierInfo();
+ superClassLoc = ConsumeToken(); // Consume super class name
+ }
+ DeclPtrTy ImplClsType = Actions.ActOnStartClassImplementation(
+ atLoc, nameId, nameLoc,
+ superClassId, superClassLoc);
+
+ if (Tok.is(tok::l_brace)) // we have ivars
+ ParseObjCClassInstanceVariables(ImplClsType/*FIXME*/, atLoc);
+ ObjCImpDecl = ImplClsType;
+
+ return DeclPtrTy();
+}
+
+Parser::DeclPtrTy Parser::ParseObjCAtEndDeclaration(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_end) &&
+ "ParseObjCAtEndDeclaration(): Expected @end");
+ DeclPtrTy Result = ObjCImpDecl;
+ ConsumeToken(); // the "end" identifier
+ if (ObjCImpDecl) {
+ Actions.ActOnAtEnd(atLoc, ObjCImpDecl);
+ ObjCImpDecl = DeclPtrTy();
+ }
+ else
+ Diag(atLoc, diag::warn_expected_implementation); // missing @implementation
+ return Result;
+}
+
+/// compatibility-alias-decl:
+/// @compatibility_alias alias-name class-name ';'
+///
+Parser::DeclPtrTy Parser::ParseObjCAtAliasDeclaration(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_compatibility_alias) &&
+ "ParseObjCAtAliasDeclaration(): Expected @compatibility_alias");
+ ConsumeToken(); // consume compatibility_alias
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return DeclPtrTy();
+ }
+ IdentifierInfo *aliasId = Tok.getIdentifierInfo();
+ SourceLocation aliasLoc = ConsumeToken(); // consume alias-name
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return DeclPtrTy();
+ }
+ IdentifierInfo *classId = Tok.getIdentifierInfo();
+ SourceLocation classLoc = ConsumeToken(); // consume class-name;
+ if (Tok.isNot(tok::semi)) {
+ Diag(Tok, diag::err_expected_semi_after) << "@compatibility_alias";
+ return DeclPtrTy();
+ }
+ return Actions.ActOnCompatiblityAlias(atLoc, aliasId, aliasLoc,
+ classId, classLoc);
+}
+
+/// property-synthesis:
+/// @synthesize property-ivar-list ';'
+///
+/// property-ivar-list:
+/// property-ivar
+/// property-ivar-list ',' property-ivar
+///
+/// property-ivar:
+/// identifier
+/// identifier '=' identifier
+///
+Parser::DeclPtrTy Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_synthesize) &&
+ "ParseObjCPropertyDynamic(): Expected '@synthesize'");
+ SourceLocation loc = ConsumeToken(); // consume synthesize
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return DeclPtrTy();
+ }
+
+ while (Tok.is(tok::identifier)) {
+ IdentifierInfo *propertyIvar = 0;
+ IdentifierInfo *propertyId = Tok.getIdentifierInfo();
+ SourceLocation propertyLoc = ConsumeToken(); // consume property name
+ if (Tok.is(tok::equal)) {
+ // property '=' ivar-name
+ ConsumeToken(); // consume '='
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ break;
+ }
+ propertyIvar = Tok.getIdentifierInfo();
+ ConsumeToken(); // consume ivar-name
+ }
+ Actions.ActOnPropertyImplDecl(atLoc, propertyLoc, true, ObjCImpDecl,
+ propertyId, propertyIvar);
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // consume ','
+ }
+ if (Tok.isNot(tok::semi))
+ Diag(Tok, diag::err_expected_semi_after) << "@synthesize";
+ return DeclPtrTy();
+}
+
+/// property-dynamic:
+/// @dynamic property-list
+///
+/// property-list:
+/// identifier
+/// property-list ',' identifier
+///
+Parser::DeclPtrTy Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_dynamic) &&
+ "ParseObjCPropertyDynamic(): Expected '@dynamic'");
+ SourceLocation loc = ConsumeToken(); // consume dynamic
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return DeclPtrTy();
+ }
+ while (Tok.is(tok::identifier)) {
+ IdentifierInfo *propertyId = Tok.getIdentifierInfo();
+ SourceLocation propertyLoc = ConsumeToken(); // consume property name
+ Actions.ActOnPropertyImplDecl(atLoc, propertyLoc, false, ObjCImpDecl,
+ propertyId, 0);
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // consume ','
+ }
+ if (Tok.isNot(tok::semi))
+ Diag(Tok, diag::err_expected_semi_after) << "@dynamic";
+ return DeclPtrTy();
+}
+
+/// objc-throw-statement:
+/// throw expression[opt];
+///
+Parser::OwningStmtResult Parser::ParseObjCThrowStmt(SourceLocation atLoc) {
+ OwningExprResult Res(Actions);
+ ConsumeToken(); // consume throw
+ if (Tok.isNot(tok::semi)) {
+ Res = ParseExpression();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+ }
+ ConsumeToken(); // consume ';'
+ return Actions.ActOnObjCAtThrowStmt(atLoc, move(Res), CurScope);
+}
+
+/// objc-synchronized-statement:
+/// @synchronized '(' expression ')' compound-statement
+///
+Parser::OwningStmtResult
+Parser::ParseObjCSynchronizedStmt(SourceLocation atLoc) {
+ ConsumeToken(); // consume synchronized
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "@synchronized";
+ return StmtError();
+ }
+ ConsumeParen(); // '('
+ OwningExprResult Res(ParseExpression());
+ if (Res.isInvalid()) {
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+ ConsumeParen(); // ')'
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+ // Enter a scope to hold everything within the compound stmt. Compound
+ // statements can always hold declarations.
+ ParseScope BodyScope(this, Scope::DeclScope);
+
+ OwningStmtResult SynchBody(ParseCompoundStatementBody());
+
+ BodyScope.Exit();
+ if (SynchBody.isInvalid())
+ SynchBody = Actions.ActOnNullStmt(Tok.getLocation());
+ return Actions.ActOnObjCAtSynchronizedStmt(atLoc, move(Res), move(SynchBody));
+}
+
+/// objc-try-catch-statement:
+/// @try compound-statement objc-catch-list[opt]
+/// @try compound-statement objc-catch-list[opt] @finally compound-statement
+///
+/// objc-catch-list:
+/// @catch ( parameter-declaration ) compound-statement
+/// objc-catch-list @catch ( catch-parameter-declaration ) compound-statement
+/// catch-parameter-declaration:
+/// parameter-declaration
+/// '...' [OBJC2]
+///
+Parser::OwningStmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
+ bool catch_or_finally_seen = false;
+
+ ConsumeToken(); // consume try
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+ OwningStmtResult CatchStmts(Actions);
+ OwningStmtResult FinallyStmt(Actions);
+ ParseScope TryScope(this, Scope::DeclScope);
+ OwningStmtResult TryBody(ParseCompoundStatementBody());
+ TryScope.Exit();
+ if (TryBody.isInvalid())
+ TryBody = Actions.ActOnNullStmt(Tok.getLocation());
+
+ while (Tok.is(tok::at)) {
+ // At this point, we need to lookahead to determine if this @ is the start
+ // of an @catch or @finally. We don't want to consume the @ token if this
+ // is an @try or @encode or something else.
+ Token AfterAt = GetLookAheadToken(1);
+ if (!AfterAt.isObjCAtKeyword(tok::objc_catch) &&
+ !AfterAt.isObjCAtKeyword(tok::objc_finally))
+ break;
+
+ SourceLocation AtCatchFinallyLoc = ConsumeToken();
+ if (Tok.isObjCAtKeyword(tok::objc_catch)) {
+ DeclPtrTy FirstPart;
+ ConsumeToken(); // consume catch
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ ParseScope CatchScope(this, Scope::DeclScope|Scope::AtCatchScope);
+ if (Tok.isNot(tok::ellipsis)) {
+ DeclSpec DS;
+ ParseDeclarationSpecifiers(DS);
+ // For some odd reason, the name of the exception variable is
+ // optional. As a result, we need to use "PrototypeContext", because
+ // we must accept either 'declarator' or 'abstract-declarator' here.
+ Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ ParseDeclarator(ParmDecl);
+
+ // Inform the actions module about the parameter declarator, so it
+ // gets added to the current scope.
+ FirstPart = Actions.ActOnParamDeclarator(CurScope, ParmDecl);
+ } else
+ ConsumeToken(); // consume '...'
+
+ SourceLocation RParenLoc;
+
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else // Skip over garbage, until we get to ')'. Eat the ')'.
+ SkipUntil(tok::r_paren, true, false);
+
+ OwningStmtResult CatchBody(Actions, true);
+ if (Tok.is(tok::l_brace))
+ CatchBody = ParseCompoundStatementBody();
+ else
+ Diag(Tok, diag::err_expected_lbrace);
+ if (CatchBody.isInvalid())
+ CatchBody = Actions.ActOnNullStmt(Tok.getLocation());
+ CatchStmts = Actions.ActOnObjCAtCatchStmt(AtCatchFinallyLoc,
+ RParenLoc, FirstPart, move(CatchBody),
+ move(CatchStmts));
+ } else {
+ Diag(AtCatchFinallyLoc, diag::err_expected_lparen_after)
+ << "@catch clause";
+ return StmtError();
+ }
+ catch_or_finally_seen = true;
+ } else {
+ assert(Tok.isObjCAtKeyword(tok::objc_finally) && "Lookahead confused?");
+ ConsumeToken(); // consume finally
+ ParseScope FinallyScope(this, Scope::DeclScope);
+
+ OwningStmtResult FinallyBody(Actions, true);
+ if (Tok.is(tok::l_brace))
+ FinallyBody = ParseCompoundStatementBody();
+ else
+ Diag(Tok, diag::err_expected_lbrace);
+ if (FinallyBody.isInvalid())
+ FinallyBody = Actions.ActOnNullStmt(Tok.getLocation());
+ FinallyStmt = Actions.ActOnObjCAtFinallyStmt(AtCatchFinallyLoc,
+ move(FinallyBody));
+ catch_or_finally_seen = true;
+ break;
+ }
+ }
+ if (!catch_or_finally_seen) {
+ Diag(atLoc, diag::err_missing_catch_finally);
+ return StmtError();
+ }
+ return Actions.ActOnObjCAtTryStmt(atLoc, move(TryBody), move(CatchStmts),
+ move(FinallyStmt));
+}
+
+/// objc-method-def: objc-method-proto ';'[opt] '{' body '}'
+///
+Parser::DeclPtrTy Parser::ParseObjCMethodDefinition() {
+ DeclPtrTy MDecl = ParseObjCMethodPrototype(ObjCImpDecl);
+
+ PrettyStackTraceActionsDecl CrashInfo(MDecl, Tok.getLocation(), Actions,
+ PP.getSourceManager(),
+ "parsing Objective-C method");
+
+ // parse optional ';'
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+
+ // We should have an opening brace now.
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_method_body);
+
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ SkipUntil(tok::l_brace, true, true);
+
+ // If we didn't find the '{', bail out.
+ if (Tok.isNot(tok::l_brace))
+ return DeclPtrTy();
+ }
+ SourceLocation BraceLoc = Tok.getLocation();
+
+ // Enter a scope for the method body.
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+
+ // Tell the actions module that we have entered a method definition with the
+ // specified Declarator for the method.
+ Actions.ActOnStartOfObjCMethodDef(CurScope, MDecl);
+
+ OwningStmtResult FnBody(ParseCompoundStatementBody());
+
+ // If the function body could not be parsed, make a bogus compoundstmt.
+ if (FnBody.isInvalid())
+ FnBody = Actions.ActOnCompoundStmt(BraceLoc, BraceLoc,
+ MultiStmtArg(Actions), false);
+
+ // TODO: Pass argument information.
+ Actions.ActOnFinishFunctionBody(MDecl, move(FnBody));
+
+ // Leave the function body scope.
+ BodyScope.Exit();
+
+ return MDecl;
+}
+
+Parser::OwningStmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
+ if (Tok.isObjCAtKeyword(tok::objc_try)) {
+ return ParseObjCTryStmt(AtLoc);
+ } else if (Tok.isObjCAtKeyword(tok::objc_throw))
+ return ParseObjCThrowStmt(AtLoc);
+ else if (Tok.isObjCAtKeyword(tok::objc_synchronized))
+ return ParseObjCSynchronizedStmt(AtLoc);
+ OwningExprResult Res(ParseExpressionWithLeadingAt(AtLoc));
+ if (Res.isInvalid()) {
+ // If the expression is invalid, skip ahead to the next semicolon. Not
+ // doing this opens us up to the possibility of infinite loops if
+ // ParseExpression does not consume any tokens.
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+ // Otherwise, eat the semicolon.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_expr);
+ return Actions.ActOnExprStmt(Actions.FullExpr(Res));
+}
+
+Parser::OwningExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
+ switch (Tok.getKind()) {
+ case tok::string_literal: // primary-expression: string-literal
+ case tok::wide_string_literal:
+ return ParsePostfixExpressionSuffix(ParseObjCStringLiteral(AtLoc));
+ default:
+ if (Tok.getIdentifierInfo() == 0)
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+
+ switch (Tok.getIdentifierInfo()->getObjCKeywordID()) {
+ case tok::objc_encode:
+ return ParsePostfixExpressionSuffix(ParseObjCEncodeExpression(AtLoc));
+ case tok::objc_protocol:
+ return ParsePostfixExpressionSuffix(ParseObjCProtocolExpression(AtLoc));
+ case tok::objc_selector:
+ return ParsePostfixExpressionSuffix(ParseObjCSelectorExpression(AtLoc));
+ default:
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+ }
+ }
+}
+
+/// objc-message-expr:
+/// '[' objc-receiver objc-message-args ']'
+///
+/// objc-receiver:
+/// expression
+/// class-name
+/// type-name
+Parser::OwningExprResult Parser::ParseObjCMessageExpression() {
+ assert(Tok.is(tok::l_square) && "'[' expected");
+ SourceLocation LBracLoc = ConsumeBracket(); // consume '['
+
+ // Parse receiver
+ if (isTokObjCMessageIdentifierReceiver()) {
+ IdentifierInfo *ReceiverName = Tok.getIdentifierInfo();
+ if (ReceiverName != Ident_super || GetLookAheadToken(1).isNot(tok::period)) {
+ SourceLocation NameLoc = ConsumeToken();
+ return ParseObjCMessageExpressionBody(LBracLoc, NameLoc, ReceiverName,
+ ExprArg(Actions));
+ }
+ }
+
+ OwningExprResult Res(ParseExpression());
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ 0, move(Res));
+}
+
+/// ParseObjCMessageExpressionBody - Having parsed "'[' objc-receiver", parse
+/// the rest of a message expression.
+///
+/// objc-message-args:
+/// objc-selector
+/// objc-keywordarg-list
+///
+/// objc-keywordarg-list:
+/// objc-keywordarg
+/// objc-keywordarg-list objc-keywordarg
+///
+/// objc-keywordarg:
+/// selector-name[opt] ':' objc-keywordexpr
+///
+/// objc-keywordexpr:
+/// nonempty-expr-list
+///
+/// nonempty-expr-list:
+/// assignment-expression
+/// nonempty-expr-list , assignment-expression
+///
+Parser::OwningExprResult
+Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
+ SourceLocation NameLoc,
+ IdentifierInfo *ReceiverName,
+ ExprArg ReceiverExpr) {
+ // Parse objc-selector
+ SourceLocation Loc;
+ IdentifierInfo *selIdent = ParseObjCSelectorPiece(Loc);
+
+ SourceLocation SelectorLoc = Loc;
+
+ llvm::SmallVector<IdentifierInfo *, 12> KeyIdents;
+ ExprVector KeyExprs(Actions);
+
+ if (Tok.is(tok::colon)) {
+ while (1) {
+ // Each iteration parses a single keyword argument.
+ KeyIdents.push_back(selIdent);
+
+ if (Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_expected_colon);
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ ConsumeToken(); // Eat the ':'.
+ /// Parse the expression after ':'
+ OwningExprResult Res(ParseAssignmentExpression());
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ // We have a valid expression.
+ KeyExprs.push_back(Res.release());
+
+ // Check for another keyword selector.
+ selIdent = ParseObjCSelectorPiece(Loc);
+ if (!selIdent && Tok.isNot(tok::colon))
+ break;
+ // We have a selector or a colon, continue parsing.
+ }
+ // Parse the, optional, argument list, comma separated.
+ while (Tok.is(tok::comma)) {
+ ConsumeToken(); // Eat the ','.
+ /// Parse the expression after ','
+ OwningExprResult Res(ParseAssignmentExpression());
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ // We have a valid expression.
+ KeyExprs.push_back(Res.release());
+ }
+ } else if (!selIdent) {
+ Diag(Tok, diag::err_expected_ident); // missing selector name.
+
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ if (Tok.isNot(tok::r_square)) {
+ Diag(Tok, diag::err_expected_rsquare);
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ SourceLocation RBracLoc = ConsumeBracket(); // consume ']'
+
+ unsigned nKeys = KeyIdents.size();
+ if (nKeys == 0)
+ KeyIdents.push_back(selIdent);
+ Selector Sel = PP.getSelectorTable().getSelector(nKeys, &KeyIdents[0]);
+
+ // We've just parsed a keyword message.
+ if (ReceiverName)
+ return Owned(Actions.ActOnClassMessage(CurScope, ReceiverName, Sel,
+ LBracLoc, NameLoc, SelectorLoc,
+ RBracLoc,
+ KeyExprs.take(), KeyExprs.size()));
+ return Owned(Actions.ActOnInstanceMessage(ReceiverExpr.release(), Sel,
+ LBracLoc, SelectorLoc, RBracLoc,
+ KeyExprs.take(), KeyExprs.size()));
+}
+
+Parser::OwningExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
+ OwningExprResult Res(ParseStringLiteralExpression());
+ if (Res.isInvalid()) return move(Res);
+
+ // @"foo" @"bar" is a valid concatenated string. Eat any subsequent string
+ // expressions. At this point, we know that the only valid thing that starts
+ // with '@' is an @"".
+ llvm::SmallVector<SourceLocation, 4> AtLocs;
+ ExprVector AtStrings(Actions);
+ AtLocs.push_back(AtLoc);
+ AtStrings.push_back(Res.release());
+
+ while (Tok.is(tok::at)) {
+ AtLocs.push_back(ConsumeToken()); // eat the @.
+
+ // Invalid unless there is a string literal.
+ if (!isTokenStringLiteral())
+ return ExprError(Diag(Tok, diag::err_objc_concat_string));
+
+ OwningExprResult Lit(ParseStringLiteralExpression());
+ if (Lit.isInvalid())
+ return move(Lit);
+
+ AtStrings.push_back(Lit.release());
+ }
+
+ return Owned(Actions.ParseObjCStringLiteral(&AtLocs[0], AtStrings.take(),
+ AtStrings.size()));
+}
+
+/// objc-encode-expression:
+/// @encode ( type-name )
+Parser::OwningExprResult
+Parser::ParseObjCEncodeExpression(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_encode) && "Not an @encode expression!");
+
+ SourceLocation EncLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@encode");
+
+ SourceLocation LParenLoc = ConsumeParen();
+
+ TypeResult Ty = ParseTypeName();
+
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ return Owned(Actions.ParseObjCEncodeExpression(AtLoc, EncLoc, LParenLoc,
+ Ty.get(), RParenLoc));
+}
+
+/// objc-protocol-expression
+/// @protocol ( protocol-name )
+Parser::OwningExprResult
+Parser::ParseObjCProtocolExpression(SourceLocation AtLoc) {
+ SourceLocation ProtoLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@protocol");
+
+ SourceLocation LParenLoc = ConsumeParen();
+
+ if (Tok.isNot(tok::identifier))
+ return ExprError(Diag(Tok, diag::err_expected_ident));
+
+ IdentifierInfo *protocolId = Tok.getIdentifierInfo();
+ ConsumeToken();
+
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ return Owned(Actions.ParseObjCProtocolExpression(protocolId, AtLoc, ProtoLoc,
+ LParenLoc, RParenLoc));
+}
+
+/// objc-selector-expression
+/// @selector '(' objc-keyword-selector ')'
+Parser::OwningExprResult
+Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
+ SourceLocation SelectorLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@selector");
+
+ llvm::SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SourceLocation LParenLoc = ConsumeParen();
+ SourceLocation sLoc;
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(sLoc);
+ if (!SelIdent && Tok.isNot(tok::colon)) // missing selector name.
+ return ExprError(Diag(Tok, diag::err_expected_ident));
+
+ KeyIdents.push_back(SelIdent);
+ unsigned nColons = 0;
+ if (Tok.isNot(tok::r_paren)) {
+ while (1) {
+ if (Tok.isNot(tok::colon))
+ return ExprError(Diag(Tok, diag::err_expected_colon));
+
+ nColons++;
+ ConsumeToken(); // Eat the ':'.
+ if (Tok.is(tok::r_paren))
+ break;
+ // Check for another keyword selector.
+ SourceLocation Loc;
+ SelIdent = ParseObjCSelectorPiece(Loc);
+ KeyIdents.push_back(SelIdent);
+ if (!SelIdent && Tok.isNot(tok::colon))
+ break;
+ }
+ }
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ Selector Sel = PP.getSelectorTable().getSelector(nColons, &KeyIdents[0]);
+ return Owned(Actions.ParseObjCSelectorExpression(Sel, AtLoc, SelectorLoc,
+ LParenLoc, RParenLoc));
+ }
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
new file mode 100644
index 0000000..94695e4
--- /dev/null
+++ b/lib/Parse/ParsePragma.cpp
@@ -0,0 +1,182 @@
+//===--- ParsePragma.cpp - Language specific pragma parsing ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the language specific #pragma handlers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ParsePragma.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/Action.h"
+#include "clang/Parse/Parser.h"
+using namespace clang;
+
+// #pragma pack(...) comes in the following delicious flavors:
+// pack '(' [integer] ')'
+// pack '(' 'show' ')'
+// pack '(' ('push' | 'pop') [',' identifier] [, integer] ')'
+void PragmaPackHandler::HandlePragma(Preprocessor &PP, Token &PackTok) {
+ // FIXME: Should we be expanding macros here? My guess is no.
+ SourceLocation PackLoc = PackTok.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "pack";
+ return;
+ }
+
+ Action::PragmaPackKind Kind = Action::PPK_Default;
+ IdentifierInfo *Name = 0;
+ Action::OwningExprResult Alignment(Actions);
+ SourceLocation LParenLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.is(tok::numeric_constant)) {
+ Alignment = Actions.ActOnNumericConstant(Tok);
+ if (Alignment.isInvalid())
+ return;
+
+ PP.Lex(Tok);
+ } else if (Tok.is(tok::identifier)) {
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("show")) {
+ Kind = Action::PPK_Show;
+ PP.Lex(Tok);
+ } else {
+ if (II->isStr("push")) {
+ Kind = Action::PPK_Push;
+ } else if (II->isStr("pop")) {
+ Kind = Action::PPK_Pop;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_invalid_action);
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::numeric_constant)) {
+ Alignment = Actions.ActOnNumericConstant(Tok);
+ if (Alignment.isInvalid())
+ return;
+
+ PP.Lex(Tok);
+ } else if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::numeric_constant)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_malformed);
+ return;
+ }
+
+ Alignment = Actions.ActOnNumericConstant(Tok);
+ if (Alignment.isInvalid())
+ return;
+
+ PP.Lex(Tok);
+ }
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_malformed);
+ return;
+ }
+ }
+ }
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "pack";
+ return;
+ }
+
+ SourceLocation RParenLoc = Tok.getLocation();
+ Actions.ActOnPragmaPack(Kind, Name, Alignment.release(), PackLoc,
+ LParenLoc, RParenLoc);
+}
+
+// #pragma unused(identifier)
+void PragmaUnusedHandler::HandlePragma(Preprocessor &PP, Token &UnusedTok) {
+ // FIXME: Should we be expanding macros here? My guess is no.
+ SourceLocation UnusedLoc = UnusedTok.getLocation();
+
+ // Lex the left '('.
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "unused";
+ return;
+ }
+ SourceLocation LParenLoc = Tok.getLocation();
+
+ // Lex the declaration reference(s).
+ llvm::SmallVector<Action::ExprTy*, 5> Ex;
+ SourceLocation RParenLoc;
+ bool LexID = true;
+
+ while (true) {
+ PP.Lex(Tok);
+
+ if (LexID) {
+ if (Tok.is(tok::identifier)) {
+ Action::OwningExprResult Name =
+ Actions.ActOnIdentifierExpr(parser.CurScope, Tok.getLocation(),
+ *Tok.getIdentifierInfo(), false);
+
+ if (Name.isInvalid()) {
+ if (!Ex.empty())
+ Action::MultiExprArg Release(Actions, &Ex[0], Ex.size());
+ return;
+ }
+
+ Ex.push_back(Name.release());
+ LexID = false;
+ continue;
+ }
+
+ // Illegal token! Release the parsed expressions (if any) and emit
+ // a warning.
+ if (!Ex.empty())
+ Action::MultiExprArg Release(Actions, &Ex[0], Ex.size());
+
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_unused_expected_var);
+ return;
+ }
+
+ // We are execting a ')' or a ','.
+ if (Tok.is(tok::comma)) {
+ LexID = true;
+ continue;
+ }
+
+ if (Tok.is(tok::r_paren)) {
+ RParenLoc = Tok.getLocation();
+ break;
+ }
+
+ // Illegal token! Release the parsed expressions (if any) and emit
+ // a warning.
+ if (!Ex.empty())
+ Action::MultiExprArg Release(Actions, &Ex[0], Ex.size());
+
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_unused_expected_punc);
+ return;
+ }
+
+ // Verify that we have a location for the right parenthesis.
+ assert(RParenLoc.isValid() && "Valid '#pragma unused' must have ')'");
+ assert(!Ex.empty() && "Valid '#pragma unused' must have arguments");
+
+ // Perform the action to handle the pragma.
+ Actions.ActOnPragmaUnused(&Ex[0], Ex.size(), UnusedLoc, LParenLoc, RParenLoc);
+}
diff --git a/lib/Parse/ParsePragma.h b/lib/Parse/ParsePragma.h
new file mode 100644
index 0000000..31b2a5f
--- /dev/null
+++ b/lib/Parse/ParsePragma.h
@@ -0,0 +1,44 @@
+//===---- ParserPragmas.h - Language specific pragmas -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines #pragma handlers for language specific pragmas.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARSE_PARSEPRAGMA_H
+#define LLVM_CLANG_PARSE_PARSEPRAGMA_H
+
+#include "clang/Lex/Pragma.h"
+
+namespace clang {
+ class Action;
+ class Parser;
+
+class PragmaPackHandler : public PragmaHandler {
+ Action &Actions;
+public:
+ PragmaPackHandler(const IdentifierInfo *N, Action &A) : PragmaHandler(N),
+ Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+};
+
+class PragmaUnusedHandler : public PragmaHandler {
+ Action &Actions;
+ Parser &parser;
+public:
+ PragmaUnusedHandler(const IdentifierInfo *N, Action &A, Parser& p)
+ : PragmaHandler(N), Actions(A), parser(p) {}
+
+ virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
new file mode 100644
index 0000000..758b662
--- /dev/null
+++ b/lib/Parse/ParseStmt.cpp
@@ -0,0 +1,1435 @@
+//===--- ParseStmt.cpp - Statement and Block Parser -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Statement and Block portions of the Parser
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "ExtensionRAIIObject.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// C99 6.8: Statements and Blocks.
+//===----------------------------------------------------------------------===//
+
+/// ParseStatementOrDeclaration - Read 'statement' or 'declaration'.
+/// StatementOrDeclaration:
+/// statement
+/// declaration
+///
+/// statement:
+/// labeled-statement
+/// compound-statement
+/// expression-statement
+/// selection-statement
+/// iteration-statement
+/// jump-statement
+/// [C++] declaration-statement
+/// [C++] try-block
+/// [OBC] objc-throw-statement
+/// [OBC] objc-try-catch-statement
+/// [OBC] objc-synchronized-statement
+/// [GNU] asm-statement
+/// [OMP] openmp-construct [TODO]
+///
+/// labeled-statement:
+/// identifier ':' statement
+/// 'case' constant-expression ':' statement
+/// 'default' ':' statement
+///
+/// selection-statement:
+/// if-statement
+/// switch-statement
+///
+/// iteration-statement:
+/// while-statement
+/// do-statement
+/// for-statement
+///
+/// expression-statement:
+/// expression[opt] ';'
+///
+/// jump-statement:
+/// 'goto' identifier ';'
+/// 'continue' ';'
+/// 'break' ';'
+/// 'return' expression[opt] ';'
+/// [GNU] 'goto' '*' expression ';'
+///
+/// [OBC] objc-throw-statement:
+/// [OBC] '@' 'throw' expression ';'
+/// [OBC] '@' 'throw' ';'
+///
+Parser::OwningStmtResult
+Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
+ const char *SemiError = 0;
+ OwningStmtResult Res(Actions);
+
+ // Cases in this switch statement should fall through if the parser expects
+ // the token to end in a semicolon (in which case SemiError should be set),
+ // or they directly 'return;' if not.
+ tok::TokenKind Kind = Tok.getKind();
+ SourceLocation AtLoc;
+ switch (Kind) {
+ case tok::at: // May be a @try or @throw statement
+ {
+ AtLoc = ConsumeToken(); // consume @
+ return ParseObjCAtStatement(AtLoc);
+ }
+
+ case tok::identifier:
+ if (NextToken().is(tok::colon)) { // C99 6.8.1: labeled-statement
+ // identifier ':' statement
+ return ParseLabeledStatement();
+ }
+ // PASS THROUGH.
+
+ default: {
+ if ((getLang().CPlusPlus || !OnlyStatement) && isDeclarationStatement()) {
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy Decl = ParseDeclaration(Declarator::BlockContext, DeclEnd);
+ return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
+ }
+
+ if (Tok.is(tok::r_brace)) {
+ Diag(Tok, diag::err_expected_statement);
+ return StmtError();
+ }
+
+ // expression[opt] ';'
+ OwningExprResult Expr(ParseExpression());
+ if (Expr.isInvalid()) {
+ // If the expression is invalid, skip ahead to the next semicolon. Not
+ // doing this opens us up to the possibility of infinite loops if
+ // ParseExpression does not consume any tokens.
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+ // Otherwise, eat the semicolon.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_expr);
+ return Actions.ActOnExprStmt(Actions.FullExpr(Expr));
+ }
+
+ case tok::kw_case: // C99 6.8.1: labeled-statement
+ return ParseCaseStatement();
+ case tok::kw_default: // C99 6.8.1: labeled-statement
+ return ParseDefaultStatement();
+
+ case tok::l_brace: // C99 6.8.2: compound-statement
+ return ParseCompoundStatement();
+ case tok::semi: // C99 6.8.3p3: expression[opt] ';'
+ return Actions.ActOnNullStmt(ConsumeToken());
+
+ case tok::kw_if: // C99 6.8.4.1: if-statement
+ return ParseIfStatement();
+ case tok::kw_switch: // C99 6.8.4.2: switch-statement
+ return ParseSwitchStatement();
+
+ case tok::kw_while: // C99 6.8.5.1: while-statement
+ return ParseWhileStatement();
+ case tok::kw_do: // C99 6.8.5.2: do-statement
+ Res = ParseDoStatement();
+ SemiError = "do/while loop";
+ break;
+ case tok::kw_for: // C99 6.8.5.3: for-statement
+ return ParseForStatement();
+
+ case tok::kw_goto: // C99 6.8.6.1: goto-statement
+ Res = ParseGotoStatement();
+ SemiError = "goto statement";
+ break;
+ case tok::kw_continue: // C99 6.8.6.2: continue-statement
+ Res = ParseContinueStatement();
+ SemiError = "continue statement";
+ break;
+ case tok::kw_break: // C99 6.8.6.3: break-statement
+ Res = ParseBreakStatement();
+ SemiError = "break statement";
+ break;
+ case tok::kw_return: // C99 6.8.6.4: return-statement
+ Res = ParseReturnStatement();
+ SemiError = "return statement";
+ break;
+
+ case tok::kw_asm: {
+ bool msAsm = false;
+ Res = ParseAsmStatement(msAsm);
+ if (msAsm) return move(Res);
+ SemiError = "asm statement";
+ break;
+ }
+
+ case tok::kw_try: // C++ 15: try-block
+ return ParseCXXTryBlock();
+ }
+
+ // If we reached this code, the statement must end in a semicolon.
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else if (!Res.isInvalid()) {
+ Diag(Tok, diag::err_expected_semi_after) << SemiError;
+ // Skip until we see a } or ;, but don't eat it.
+ SkipUntil(tok::r_brace, true, true);
+ }
+ return move(Res);
+}
+
+/// ParseLabeledStatement - We have an identifier and a ':' after it.
+///
+/// labeled-statement:
+/// identifier ':' statement
+/// [GNU] identifier ':' attributes[opt] statement
+///
+Parser::OwningStmtResult Parser::ParseLabeledStatement() {
+ assert(Tok.is(tok::identifier) && Tok.getIdentifierInfo() &&
+ "Not an identifier!");
+
+ Token IdentTok = Tok; // Save the whole token.
+ ConsumeToken(); // eat the identifier.
+
+ assert(Tok.is(tok::colon) && "Not a label!");
+
+ // identifier ':' statement
+ SourceLocation ColonLoc = ConsumeToken();
+
+ // Read label attributes, if present.
+ Action::AttrTy *AttrList = 0;
+ if (Tok.is(tok::kw___attribute))
+ // TODO: save these somewhere.
+ AttrList = ParseAttributes();
+
+ OwningStmtResult SubStmt(ParseStatement());
+
+ // Broken substmt shouldn't prevent the label from being added to the AST.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
+
+ return Actions.ActOnLabelStmt(IdentTok.getLocation(),
+ IdentTok.getIdentifierInfo(),
+ ColonLoc, move(SubStmt));
+}
+
+/// ParseCaseStatement
+/// labeled-statement:
+/// 'case' constant-expression ':' statement
+/// [GNU] 'case' constant-expression '...' constant-expression ':' statement
+///
+Parser::OwningStmtResult Parser::ParseCaseStatement() {
+ assert(Tok.is(tok::kw_case) && "Not a case stmt!");
+
+ // It is very very common for code to contain many case statements recursively
+ // nested, as in (but usually without indentation):
+ // case 1:
+ // case 2:
+ // case 3:
+ // case 4:
+ // case 5: etc.
+ //
+ // Parsing this naively works, but is both inefficient and can cause us to run
+ // out of stack space in our recursive descent parser. As a special case,
+ // flatten this recursion into an iterative loop. This is complex and gross,
+ // but all the grossness is constrained to ParseCaseStatement (and some
+ // wierdness in the actions), so this is just local grossness :).
+
+ // TopLevelCase - This is the highest level we have parsed. 'case 1' in the
+ // example above.
+ OwningStmtResult TopLevelCase(Actions, true);
+
+ // DeepestParsedCaseStmt - This is the deepest statement we have parsed, which
+ // gets updated each time a new case is parsed, and whose body is unset so
+ // far. When parsing 'case 4', this is the 'case 3' node.
+ StmtTy *DeepestParsedCaseStmt = 0;
+
+ // While we have case statements, eat and stack them.
+ do {
+ SourceLocation CaseLoc = ConsumeToken(); // eat the 'case'.
+
+ OwningExprResult LHS(ParseConstantExpression());
+ if (LHS.isInvalid()) {
+ SkipUntil(tok::colon);
+ return StmtError();
+ }
+
+ // GNU case range extension.
+ SourceLocation DotDotDotLoc;
+ OwningExprResult RHS(Actions);
+ if (Tok.is(tok::ellipsis)) {
+ Diag(Tok, diag::ext_gnu_case_range);
+ DotDotDotLoc = ConsumeToken();
+
+ RHS = ParseConstantExpression();
+ if (RHS.isInvalid()) {
+ SkipUntil(tok::colon);
+ return StmtError();
+ }
+ }
+
+ if (Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_expected_colon_after) << "'case'";
+ SkipUntil(tok::colon);
+ return StmtError();
+ }
+
+ SourceLocation ColonLoc = ConsumeToken();
+
+ OwningStmtResult Case =
+ Actions.ActOnCaseStmt(CaseLoc, move(LHS), DotDotDotLoc,
+ move(RHS), ColonLoc);
+
+ // If we had a sema error parsing this case, then just ignore it and
+ // continue parsing the sub-stmt.
+ if (Case.isInvalid()) {
+ if (TopLevelCase.isInvalid()) // No parsed case stmts.
+ return ParseStatement();
+ // Otherwise, just don't add it as a nested case.
+ } else {
+ // If this is the first case statement we parsed, it becomes TopLevelCase.
+ // Otherwise we link it into the current chain.
+ StmtTy *NextDeepest = Case.get();
+ if (TopLevelCase.isInvalid())
+ TopLevelCase = move(Case);
+ else
+ Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, move(Case));
+ DeepestParsedCaseStmt = NextDeepest;
+ }
+
+ // Handle all case statements.
+ } while (Tok.is(tok::kw_case));
+
+ assert(!TopLevelCase.isInvalid() && "Should have parsed at least one case!");
+
+ // If we found a non-case statement, start by parsing it.
+ OwningStmtResult SubStmt(Actions);
+
+ if (Tok.isNot(tok::r_brace)) {
+ SubStmt = ParseStatement();
+ } else {
+ // Nicely diagnose the common error "switch (X) { case 4: }", which is
+ // not valid.
+ // FIXME: add insertion hint.
+ Diag(Tok, diag::err_label_end_of_compound_statement);
+ SubStmt = true;
+ }
+
+ // Broken sub-stmt shouldn't prevent forming the case statement properly.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(SourceLocation());
+
+ // Install the body into the most deeply-nested case.
+ Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, move(SubStmt));
+
+ // Return the top level parsed statement tree.
+ return move(TopLevelCase);
+}
+
+/// ParseDefaultStatement
+/// labeled-statement:
+/// 'default' ':' statement
+/// Note that this does not parse the 'statement' at the end.
+///
+Parser::OwningStmtResult Parser::ParseDefaultStatement() {
+ assert(Tok.is(tok::kw_default) && "Not a default stmt!");
+ SourceLocation DefaultLoc = ConsumeToken(); // eat the 'default'.
+
+ if (Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_expected_colon_after) << "'default'";
+ SkipUntil(tok::colon);
+ return StmtError();
+ }
+
+ SourceLocation ColonLoc = ConsumeToken();
+
+ // Diagnose the common error "switch (X) {... default: }", which is not valid.
+ if (Tok.is(tok::r_brace)) {
+ Diag(Tok, diag::err_label_end_of_compound_statement);
+ return StmtError();
+ }
+
+ OwningStmtResult SubStmt(ParseStatement());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnDefaultStmt(DefaultLoc, ColonLoc,
+ move(SubStmt), CurScope);
+}
+
+
+/// ParseCompoundStatement - Parse a "{}" block.
+///
+/// compound-statement: [C99 6.8.2]
+/// { block-item-list[opt] }
+/// [GNU] { label-declarations block-item-list } [TODO]
+///
+/// block-item-list:
+/// block-item
+/// block-item-list block-item
+///
+/// block-item:
+/// declaration
+/// [GNU] '__extension__' declaration
+/// statement
+/// [OMP] openmp-directive [TODO]
+///
+/// [GNU] label-declarations:
+/// [GNU] label-declaration
+/// [GNU] label-declarations label-declaration
+///
+/// [GNU] label-declaration:
+/// [GNU] '__label__' identifier-list ';'
+///
+/// [OMP] openmp-directive: [TODO]
+/// [OMP] barrier-directive
+/// [OMP] flush-directive
+///
+Parser::OwningStmtResult Parser::ParseCompoundStatement(bool isStmtExpr) {
+ assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
+
+ // Enter a scope to hold everything within the compound stmt. Compound
+ // statements can always hold declarations.
+ ParseScope CompoundScope(this, Scope::DeclScope);
+
+ // Parse the statements in the body.
+ return ParseCompoundStatementBody(isStmtExpr);
+}
+
+
+/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
+/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
+/// consume the '}' at the end of the block. It does not manipulate the scope
+/// stack.
+Parser::OwningStmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(),
+ Tok.getLocation(),
+ "in compound statement ('{}')");
+
+ SourceLocation LBraceLoc = ConsumeBrace(); // eat the '{'.
+
+ // TODO: "__label__ X, Y, Z;" is the GNU "Local Label" extension. These are
+ // only allowed at the start of a compound stmt regardless of the language.
+
+ typedef StmtVector StmtsTy;
+ StmtsTy Stmts(Actions);
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ OwningStmtResult R(Actions);
+ if (Tok.isNot(tok::kw___extension__)) {
+ R = ParseStatementOrDeclaration(false);
+ } else {
+ // __extension__ can start declarations and it can also be a unary
+ // operator for expressions. Consume multiple __extension__ markers here
+ // until we can determine which is which.
+ // FIXME: This loses extension expressions in the AST!
+ SourceLocation ExtLoc = ConsumeToken();
+ while (Tok.is(tok::kw___extension__))
+ ConsumeToken();
+
+ // If this is the start of a declaration, parse it as such.
+ if (isDeclarationStatement()) {
+ // __extension__ silences extension warnings in the subdeclaration.
+ // FIXME: Save the __extension__ on the decl as a node somehow?
+ ExtensionRAIIObject O(Diags);
+
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy Res = ParseDeclaration(Declarator::BlockContext,DeclEnd);
+ R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
+ } else {
+ // Otherwise this was a unary __extension__ marker.
+ OwningExprResult Res(ParseExpressionWithLeadingExtension(ExtLoc));
+
+ if (Res.isInvalid()) {
+ SkipUntil(tok::semi);
+ continue;
+ }
+
+ // Eat the semicolon at the end of stmt and convert the expr into a
+ // statement.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_expr);
+ R = Actions.ActOnExprStmt(Actions.FullExpr(Res));
+ }
+ }
+
+ if (R.isUsable())
+ Stmts.push_back(R.release());
+ }
+
+ // We broke out of the while loop because we found a '}' or EOF.
+ if (Tok.isNot(tok::r_brace)) {
+ Diag(Tok, diag::err_expected_rbrace);
+ return StmtError();
+ }
+
+ SourceLocation RBraceLoc = ConsumeBrace();
+ return Actions.ActOnCompoundStmt(LBraceLoc, RBraceLoc, move_arg(Stmts),
+ isStmtExpr);
+}
+
+/// ParseParenExprOrCondition:
+/// [C ] '(' expression ')'
+/// [C++] '(' condition ')' [not allowed if OnlyAllowCondition=true]
+///
+/// This function parses and performs error recovery on the specified condition
+/// or expression (depending on whether we're in C++ or C mode). This function
+/// goes out of its way to recover well. It returns true if there was a parser
+/// error (the right paren couldn't be found), which indicates that the caller
+/// should try to recover harder. It returns false if the condition is
+/// successfully parsed. Note that a successful parse can still have semantic
+/// errors in the condition.
+bool Parser::ParseParenExprOrCondition(OwningExprResult &CondExp,
+ bool OnlyAllowCondition) {
+ SourceLocation LParenLoc = ConsumeParen();
+
+ if (getLang().CPlusPlus)
+ CondExp = ParseCXXCondition();
+ else
+ CondExp = ParseExpression();
+
+ // If the parser was confused by the condition and we don't have a ')', try to
+ // recover by skipping ahead to a semi and bailing out. If condexp is
+ // semantically invalid but we have well formed code, keep going.
+ if (CondExp.isInvalid() && Tok.isNot(tok::r_paren)) {
+ SkipUntil(tok::semi);
+ // Skipping may have stopped if it found the containing ')'. If so, we can
+ // continue parsing the if statement.
+ if (Tok.isNot(tok::r_paren))
+ return true;
+ }
+
+ // Otherwise the condition is valid or the rparen is present.
+ MatchRHSPunctuation(tok::r_paren, LParenLoc);
+ return false;
+}
+
+
+/// ParseIfStatement
+/// if-statement: [C99 6.8.4.1]
+/// 'if' '(' expression ')' statement
+/// 'if' '(' expression ')' statement 'else' statement
+/// [C++] 'if' '(' condition ')' statement
+/// [C++] 'if' '(' condition ')' statement 'else' statement
+///
+Parser::OwningStmtResult Parser::ParseIfStatement() {
+ assert(Tok.is(tok::kw_if) && "Not an if stmt!");
+ SourceLocation IfLoc = ConsumeToken(); // eat the 'if'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "if";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLang().C99 || getLang().CPlusPlus;
+
+ // C99 6.8.4p3 - In C99, the if statement is a block. This is not
+ // the case for C90.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ ParseScope IfScope(this, Scope::DeclScope | Scope::ControlScope, C99orCXX);
+
+ // Parse the condition.
+ OwningExprResult CondExp(Actions);
+ if (ParseParenExprOrCondition(CondExp))
+ return StmtError();
+
+ // C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ // For C++ we create a scope for the condition and a new scope for
+ // substatements because:
+ // -When the 'then' scope exits, we want the condition declaration to still be
+ // active for the 'else' scope too.
+ // -Sema will detect name clashes by considering declarations of a
+ // 'ControlScope' as part of its direct subscope.
+ // -If we wanted the condition and substatement to be in the same scope, we
+ // would have to notify ParseStatement not to create a new scope. It's
+ // simpler to let it create a new scope.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ // Read the 'then' stmt.
+ SourceLocation ThenStmtLoc = Tok.getLocation();
+ OwningStmtResult ThenStmt(ParseStatement());
+
+ // Pop the 'if' scope if needed.
+ InnerScope.Exit();
+
+ // If it has an else, parse it.
+ SourceLocation ElseLoc;
+ SourceLocation ElseStmtLoc;
+ OwningStmtResult ElseStmt(Actions);
+
+ if (Tok.is(tok::kw_else)) {
+ ElseLoc = ConsumeToken();
+
+ // C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do
+ // this if the body isn't a compound statement to avoid push/pop in common
+ // cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ bool WithinElse = CurScope->isWithinElse();
+ CurScope->setWithinElse(true);
+ ElseStmtLoc = Tok.getLocation();
+ ElseStmt = ParseStatement();
+ CurScope->setWithinElse(WithinElse);
+
+ // Pop the 'else' scope if needed.
+ InnerScope.Exit();
+ }
+
+ IfScope.Exit();
+
+ // If the condition was invalid, discard the if statement. We could recover
+ // better by replacing it with a valid expr, but don't do that yet.
+ if (CondExp.isInvalid())
+ return StmtError();
+
+ // If the then or else stmt is invalid and the other is valid (and present),
+ // make turn the invalid one into a null stmt to avoid dropping the other
+ // part. If both are invalid, return error.
+ if ((ThenStmt.isInvalid() && ElseStmt.isInvalid()) ||
+ (ThenStmt.isInvalid() && ElseStmt.get() == 0) ||
+ (ThenStmt.get() == 0 && ElseStmt.isInvalid())) {
+ // Both invalid, or one is invalid and other is non-present: return error.
+ return StmtError();
+ }
+
+ // Now if either are invalid, replace with a ';'.
+ if (ThenStmt.isInvalid())
+ ThenStmt = Actions.ActOnNullStmt(ThenStmtLoc);
+ if (ElseStmt.isInvalid())
+ ElseStmt = Actions.ActOnNullStmt(ElseStmtLoc);
+
+ return Actions.ActOnIfStmt(IfLoc, Actions.FullExpr(CondExp), move(ThenStmt),
+ ElseLoc, move(ElseStmt));
+}
+
+/// ParseSwitchStatement
+/// switch-statement:
+/// 'switch' '(' expression ')' statement
+/// [C++] 'switch' '(' condition ')' statement
+Parser::OwningStmtResult Parser::ParseSwitchStatement() {
+ assert(Tok.is(tok::kw_switch) && "Not a switch stmt!");
+ SourceLocation SwitchLoc = ConsumeToken(); // eat the 'switch'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "switch";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLang().C99 || getLang().CPlusPlus;
+
+ // C99 6.8.4p3 - In C99, the switch statement is a block. This is
+ // not the case for C90. Start the switch scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ unsigned ScopeFlags = Scope::BreakScope;
+ if (C99orCXX)
+ ScopeFlags |= Scope::DeclScope | Scope::ControlScope;
+ ParseScope SwitchScope(this, ScopeFlags);
+
+ // Parse the condition.
+ OwningExprResult Cond(Actions);
+ if (ParseParenExprOrCondition(Cond))
+ return StmtError();
+
+ OwningStmtResult Switch(Actions);
+ if (!Cond.isInvalid())
+ Switch = Actions.ActOnStartOfSwitchStmt(move(Cond));
+
+ // C99 6.8.4p3 - In C99, the body of the switch statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ // See comments in ParseIfStatement for why we create a scope for the
+ // condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ OwningStmtResult Body(ParseStatement());
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+
+ if (Body.isInvalid()) {
+ Body = Actions.ActOnNullStmt(Tok.getLocation());
+ // FIXME: Remove the case statement list from the Switch statement.
+ }
+
+ SwitchScope.Exit();
+
+ if (Cond.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnFinishSwitchStmt(SwitchLoc, move(Switch), move(Body));
+}
+
+/// ParseWhileStatement
+/// while-statement: [C99 6.8.5.1]
+/// 'while' '(' expression ')' statement
+/// [C++] 'while' '(' condition ')' statement
+Parser::OwningStmtResult Parser::ParseWhileStatement() {
+ assert(Tok.is(tok::kw_while) && "Not a while stmt!");
+ SourceLocation WhileLoc = Tok.getLocation();
+ ConsumeToken(); // eat the 'while'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "while";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLang().C99 || getLang().CPlusPlus;
+
+ // C99 6.8.5p5 - In C99, the while statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ unsigned ScopeFlags;
+ if (C99orCXX)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope |
+ Scope::DeclScope | Scope::ControlScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+ ParseScope WhileScope(this, ScopeFlags);
+
+ // Parse the condition.
+ OwningExprResult Cond(Actions);
+ if (ParseParenExprOrCondition(Cond))
+ return StmtError();
+
+ // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ // See comments in ParseIfStatement for why we create a scope for the
+ // condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ OwningStmtResult Body(ParseStatement());
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+ WhileScope.Exit();
+
+ if (Cond.isInvalid() || Body.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnWhileStmt(WhileLoc, Actions.FullExpr(Cond), move(Body));
+}
+
+/// ParseDoStatement
+/// do-statement: [C99 6.8.5.2]
+/// 'do' statement 'while' '(' expression ')' ';'
+/// Note: this lets the caller parse the end ';'.
+Parser::OwningStmtResult Parser::ParseDoStatement() {
+ assert(Tok.is(tok::kw_do) && "Not a do stmt!");
+ SourceLocation DoLoc = ConsumeToken(); // eat the 'do'.
+
+ // C99 6.8.5p5 - In C99, the do statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ unsigned ScopeFlags;
+ if (getLang().C99)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope | Scope::DeclScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+
+ ParseScope DoScope(this, ScopeFlags);
+
+ // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ (getLang().C99 || getLang().CPlusPlus) &&
+ Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ OwningStmtResult Body(ParseStatement());
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+
+ if (Tok.isNot(tok::kw_while)) {
+ if (!Body.isInvalid()) {
+ Diag(Tok, diag::err_expected_while);
+ Diag(DoLoc, diag::note_matching) << "do";
+ SkipUntil(tok::semi, false, true);
+ }
+ return StmtError();
+ }
+ SourceLocation WhileLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "do/while";
+ SkipUntil(tok::semi, false, true);
+ return StmtError();
+ }
+
+ // Parse the parenthesized condition.
+ OwningExprResult Cond(Actions);
+ ParseParenExprOrCondition(Cond, true);
+
+ DoScope.Exit();
+
+ if (Cond.isInvalid() || Body.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnDoStmt(DoLoc, move(Body), WhileLoc, move(Cond));
+}
+
+/// ParseForStatement
+/// for-statement: [C99 6.8.5.3]
+/// 'for' '(' expr[opt] ';' expr[opt] ';' expr[opt] ')' statement
+/// 'for' '(' declaration expr[opt] ';' expr[opt] ')' statement
+/// [C++] 'for' '(' for-init-statement condition[opt] ';' expression[opt] ')'
+/// [C++] statement
+/// [OBJC2] 'for' '(' declaration 'in' expr ')' statement
+/// [OBJC2] 'for' '(' expr 'in' expr ')' statement
+///
+/// [C++] for-init-statement:
+/// [C++] expression-statement
+/// [C++] simple-declaration
+///
+Parser::OwningStmtResult Parser::ParseForStatement() {
+ assert(Tok.is(tok::kw_for) && "Not a for stmt!");
+ SourceLocation ForLoc = ConsumeToken(); // eat the 'for'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "for";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXXorObjC = getLang().C99 || getLang().CPlusPlus || getLang().ObjC1;
+
+ // C99 6.8.5p5 - In C99, the for statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ // C++ 6.5.3p1:
+ // Names declared in the for-init-statement are in the same declarative-region
+ // as those declared in the condition.
+ //
+ unsigned ScopeFlags;
+ if (C99orCXXorObjC)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope |
+ Scope::DeclScope | Scope::ControlScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+
+ ParseScope ForScope(this, ScopeFlags);
+
+ SourceLocation LParenLoc = ConsumeParen();
+ OwningExprResult Value(Actions);
+
+ bool ForEach = false;
+ OwningStmtResult FirstPart(Actions);
+ OwningExprResult SecondPart(Actions), ThirdPart(Actions);
+
+ // Parse the first part of the for specifier.
+ if (Tok.is(tok::semi)) { // for (;
+ // no first part, eat the ';'.
+ ConsumeToken();
+ } else if (isSimpleDeclaration()) { // for (int X = 4;
+ // Parse declaration, which eats the ';'.
+ if (!C99orCXXorObjC) // Use of C99-style for loops in C90 mode?
+ Diag(Tok, diag::ext_c99_variable_decl_in_for_loop);
+
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(Declarator::ForContext, DeclEnd,
+ false);
+ FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
+
+ if (Tok.is(tok::semi)) { // for (int x = 4;
+ ConsumeToken();
+ } else if ((ForEach = isTokIdentifier_in())) {
+ // ObjC: for (id x in expr)
+ ConsumeToken(); // consume 'in'
+ SecondPart = ParseExpression();
+ } else {
+ Diag(Tok, diag::err_expected_semi_for);
+ SkipUntil(tok::semi);
+ }
+ } else {
+ Value = ParseExpression();
+
+ // Turn the expression into a stmt.
+ if (!Value.isInvalid())
+ FirstPart = Actions.ActOnExprStmt(Actions.FullExpr(Value));
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else if ((ForEach = isTokIdentifier_in())) {
+ ConsumeToken(); // consume 'in'
+ SecondPart = ParseExpression();
+ } else {
+ if (!Value.isInvalid()) Diag(Tok, diag::err_expected_semi_for);
+ SkipUntil(tok::semi);
+ }
+ }
+ if (!ForEach) {
+ assert(!SecondPart.get() && "Shouldn't have a second expression yet.");
+ // Parse the second part of the for specifier.
+ if (Tok.is(tok::semi)) { // for (...;;
+ // no second part.
+ } else {
+ SecondPart =getLang().CPlusPlus ? ParseCXXCondition() : ParseExpression();
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else {
+ if (!SecondPart.isInvalid()) Diag(Tok, diag::err_expected_semi_for);
+ SkipUntil(tok::semi);
+ }
+
+ // Parse the third part of the for specifier.
+ if (Tok.isNot(tok::r_paren)) // for (...;...;)
+ ThirdPart = ParseExpression();
+ }
+ // Match the ')'.
+ SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc);
+
+ // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ // See comments in ParseIfStatement for why we create a scope for
+ // for-init-statement/condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXXorObjC && Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ OwningStmtResult Body(ParseStatement());
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+
+ // Leave the for-scope.
+ ForScope.Exit();
+
+ if (Body.isInvalid())
+ return StmtError();
+
+ if (!ForEach)
+ return Actions.ActOnForStmt(ForLoc, LParenLoc, move(FirstPart),
+ move(SecondPart), move(ThirdPart),
+ RParenLoc, move(Body));
+
+ return Actions.ActOnObjCForCollectionStmt(ForLoc, LParenLoc,
+ move(FirstPart),
+ move(SecondPart),
+ RParenLoc, move(Body));
+}
+
+/// ParseGotoStatement
+/// jump-statement:
+/// 'goto' identifier ';'
+/// [GNU] 'goto' '*' expression ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+Parser::OwningStmtResult Parser::ParseGotoStatement() {
+ assert(Tok.is(tok::kw_goto) && "Not a goto stmt!");
+ SourceLocation GotoLoc = ConsumeToken(); // eat the 'goto'.
+
+ OwningStmtResult Res(Actions);
+ if (Tok.is(tok::identifier)) {
+ Res = Actions.ActOnGotoStmt(GotoLoc, Tok.getLocation(),
+ Tok.getIdentifierInfo());
+ ConsumeToken();
+ } else if (Tok.is(tok::star)) {
+ // GNU indirect goto extension.
+ Diag(Tok, diag::ext_gnu_indirect_goto);
+ SourceLocation StarLoc = ConsumeToken();
+ OwningExprResult R(ParseExpression());
+ if (R.isInvalid()) { // Skip to the semicolon, but don't consume it.
+ SkipUntil(tok::semi, false, true);
+ return StmtError();
+ }
+ Res = Actions.ActOnIndirectGotoStmt(GotoLoc, StarLoc, move(R));
+ } else {
+ Diag(Tok, diag::err_expected_ident);
+ return StmtError();
+ }
+
+ return move(Res);
+}
+
+/// ParseContinueStatement
+/// jump-statement:
+/// 'continue' ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+Parser::OwningStmtResult Parser::ParseContinueStatement() {
+ SourceLocation ContinueLoc = ConsumeToken(); // eat the 'continue'.
+ return Actions.ActOnContinueStmt(ContinueLoc, CurScope);
+}
+
+/// ParseBreakStatement
+/// jump-statement:
+/// 'break' ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+Parser::OwningStmtResult Parser::ParseBreakStatement() {
+ SourceLocation BreakLoc = ConsumeToken(); // eat the 'break'.
+ return Actions.ActOnBreakStmt(BreakLoc, CurScope);
+}
+
+/// ParseReturnStatement
+/// jump-statement:
+/// 'return' expression[opt] ';'
+Parser::OwningStmtResult Parser::ParseReturnStatement() {
+ assert(Tok.is(tok::kw_return) && "Not a return stmt!");
+ SourceLocation ReturnLoc = ConsumeToken(); // eat the 'return'.
+
+ OwningExprResult R(Actions);
+ if (Tok.isNot(tok::semi)) {
+ R = ParseExpression();
+ if (R.isInvalid()) { // Skip to the semicolon, but don't consume it.
+ SkipUntil(tok::semi, false, true);
+ return StmtError();
+ }
+ }
+ return Actions.ActOnReturnStmt(ReturnLoc, Actions.FullExpr(R));
+}
+
+/// FuzzyParseMicrosoftAsmStatement. When -fms-extensions is enabled, this
+/// routine is called to skip/ignore tokens that comprise the MS asm statement.
+Parser::OwningStmtResult Parser::FuzzyParseMicrosoftAsmStatement() {
+ if (Tok.is(tok::l_brace)) {
+ unsigned short savedBraceCount = BraceCount;
+ do {
+ ConsumeAnyToken();
+ } while (BraceCount > savedBraceCount && Tok.isNot(tok::eof));
+ } else {
+ // From the MS website: If used without braces, the __asm keyword means
+ // that the rest of the line is an assembly-language statement.
+ SourceManager &SrcMgr = PP.getSourceManager();
+ SourceLocation TokLoc = Tok.getLocation();
+ unsigned LineNo = SrcMgr.getInstantiationLineNumber(TokLoc);
+ do {
+ ConsumeAnyToken();
+ TokLoc = Tok.getLocation();
+ } while ((SrcMgr.getInstantiationLineNumber(TokLoc) == LineNo) &&
+ Tok.isNot(tok::r_brace) && Tok.isNot(tok::semi) &&
+ Tok.isNot(tok::eof));
+ }
+ return Actions.ActOnNullStmt(Tok.getLocation());
+}
+
+/// ParseAsmStatement - Parse a GNU extended asm statement.
+/// asm-statement:
+/// gnu-asm-statement
+/// ms-asm-statement
+///
+/// [GNU] gnu-asm-statement:
+/// 'asm' type-qualifier[opt] '(' asm-argument ')' ';'
+///
+/// [GNU] asm-argument:
+/// asm-string-literal
+/// asm-string-literal ':' asm-operands[opt]
+/// asm-string-literal ':' asm-operands[opt] ':' asm-operands[opt]
+/// asm-string-literal ':' asm-operands[opt] ':' asm-operands[opt]
+/// ':' asm-clobbers
+///
+/// [GNU] asm-clobbers:
+/// asm-string-literal
+/// asm-clobbers ',' asm-string-literal
+///
+/// [MS] ms-asm-statement:
+/// '__asm' assembly-instruction ';'[opt]
+/// '__asm' '{' assembly-instruction-list '}' ';'[opt]
+///
+/// [MS] assembly-instruction-list:
+/// assembly-instruction ';'[opt]
+/// assembly-instruction-list ';' assembly-instruction ';'[opt]
+///
+Parser::OwningStmtResult Parser::ParseAsmStatement(bool &msAsm) {
+ assert(Tok.is(tok::kw_asm) && "Not an asm stmt");
+ SourceLocation AsmLoc = ConsumeToken();
+
+ if (getLang().Microsoft && Tok.isNot(tok::l_paren) && !isTypeQualifier()) {
+ msAsm = true;
+ return FuzzyParseMicrosoftAsmStatement();
+ }
+ DeclSpec DS;
+ SourceLocation Loc = Tok.getLocation();
+ ParseTypeQualifierListOpt(DS);
+
+ // GNU asms accept, but warn, about type-qualifiers other than volatile.
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(Loc, diag::w_asm_qualifier_ignored) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
+ Diag(Loc, diag::w_asm_qualifier_ignored) << "restrict";
+
+ // Remember if this was a volatile asm.
+ bool isVolatile = DS.getTypeQualifiers() & DeclSpec::TQ_volatile;
+ bool isSimple = false;
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm";
+ SkipUntil(tok::r_paren);
+ return StmtError();
+ }
+ Loc = ConsumeParen();
+
+ OwningExprResult AsmString(ParseAsmStringLiteral());
+ if (AsmString.isInvalid())
+ return StmtError();
+
+ llvm::SmallVector<std::string, 4> Names;
+ ExprVector Constraints(Actions);
+ ExprVector Exprs(Actions);
+ ExprVector Clobbers(Actions);
+
+ unsigned NumInputs = 0, NumOutputs = 0;
+
+ SourceLocation RParenLoc;
+ if (Tok.is(tok::r_paren)) {
+ // We have a simple asm expression
+ isSimple = true;
+
+ RParenLoc = ConsumeParen();
+ } else {
+ // Parse Outputs, if present.
+ if (ParseAsmOperandsOpt(Names, Constraints, Exprs))
+ return StmtError();
+
+ NumOutputs = Names.size();
+
+ // Parse Inputs, if present.
+ if (ParseAsmOperandsOpt(Names, Constraints, Exprs))
+ return StmtError();
+
+ assert(Names.size() == Constraints.size() &&
+ Constraints.size() == Exprs.size()
+ && "Input operand size mismatch!");
+
+ NumInputs = Names.size() - NumOutputs;
+
+ // Parse the clobbers, if present.
+ if (Tok.is(tok::colon)) {
+ ConsumeToken();
+
+ // Parse the asm-string list for clobbers.
+ while (1) {
+ OwningExprResult Clobber(ParseAsmStringLiteral());
+
+ if (Clobber.isInvalid())
+ break;
+
+ Clobbers.push_back(Clobber.release());
+
+ if (Tok.isNot(tok::comma)) break;
+ ConsumeToken();
+ }
+ }
+
+ RParenLoc = MatchRHSPunctuation(tok::r_paren, Loc);
+ }
+
+ return Actions.ActOnAsmStmt(AsmLoc, isSimple, isVolatile,
+ NumOutputs, NumInputs, Names.data(),
+ move_arg(Constraints), move_arg(Exprs),
+ move(AsmString), move_arg(Clobbers),
+ RParenLoc);
+}
+
+/// ParseAsmOperands - Parse the asm-operands production as used by
+/// asm-statement. We also parse a leading ':' token. If the leading colon is
+/// not present, we do not parse anything.
+///
+/// [GNU] asm-operands:
+/// asm-operand
+/// asm-operands ',' asm-operand
+///
+/// [GNU] asm-operand:
+/// asm-string-literal '(' expression ')'
+/// '[' identifier ']' asm-string-literal '(' expression ')'
+///
+bool Parser::ParseAsmOperandsOpt(llvm::SmallVectorImpl<std::string> &Names,
+ llvm::SmallVectorImpl<ExprTy*> &Constraints,
+ llvm::SmallVectorImpl<ExprTy*> &Exprs) {
+ // Only do anything if this operand is present.
+ if (Tok.isNot(tok::colon)) return false;
+ ConsumeToken();
+
+ // 'asm-operands' isn't present?
+ if (!isTokenStringLiteral() && Tok.isNot(tok::l_square))
+ return false;
+
+ while (1) {
+ // Read the [id] if present.
+ if (Tok.is(tok::l_square)) {
+ SourceLocation Loc = ConsumeBracket();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ ConsumeToken();
+
+ Names.push_back(std::string(II->getName(), II->getLength()));
+ MatchRHSPunctuation(tok::r_square, Loc);
+ } else
+ Names.push_back(std::string());
+
+ OwningExprResult Constraint(ParseAsmStringLiteral());
+ if (Constraint.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+ Constraints.push_back(Constraint.release());
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm operand";
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+
+ // Read the parenthesized expression.
+ SourceLocation OpenLoc = ConsumeParen();
+ OwningExprResult Res(ParseExpression());
+ MatchRHSPunctuation(tok::r_paren, OpenLoc);
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+ Exprs.push_back(Res.release());
+ // Eat the comma and continue parsing if it exists.
+ if (Tok.isNot(tok::comma)) return false;
+ ConsumeToken();
+ }
+
+ return true;
+}
+
+Parser::DeclPtrTy Parser::ParseFunctionStatementBody(DeclPtrTy Decl) {
+ assert(Tok.is(tok::l_brace));
+ SourceLocation LBraceLoc = Tok.getLocation();
+
+ PrettyStackTraceActionsDecl CrashInfo(Decl, LBraceLoc, Actions,
+ PP.getSourceManager(),
+ "parsing function body");
+
+ // Do not enter a scope for the brace, as the arguments are in the same scope
+ // (the function body) as the body itself. Instead, just read the statement
+ // list and put it into a CompoundStmt for safe keeping.
+ OwningStmtResult FnBody(ParseCompoundStatementBody());
+
+ // If the function body could not be parsed, make a bogus compoundstmt.
+ if (FnBody.isInvalid())
+ FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
+ MultiStmtArg(Actions), false);
+
+ return Actions.ActOnFinishFunctionBody(Decl, move(FnBody));
+}
+
+/// ParseFunctionTryBlock - Parse a C++ function-try-block.
+///
+/// function-try-block:
+/// 'try' ctor-initializer[opt] compound-statement handler-seq
+///
+Parser::DeclPtrTy Parser::ParseFunctionTryBlock(DeclPtrTy Decl) {
+ assert(Tok.is(tok::kw_try) && "Expected 'try'");
+ SourceLocation TryLoc = ConsumeToken();
+
+ PrettyStackTraceActionsDecl CrashInfo(Decl, TryLoc, Actions,
+ PP.getSourceManager(),
+ "parsing function try block");
+
+ // Constructor initializer list?
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(Decl);
+
+ SourceLocation LBraceLoc = Tok.getLocation();
+ OwningStmtResult FnBody(ParseCXXTryBlockCommon(TryLoc));
+ // If we failed to parse the try-catch, we just give the function an empty
+ // compound statement as the body.
+ if (FnBody.isInvalid())
+ FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
+ MultiStmtArg(Actions), false);
+
+ return Actions.ActOnFinishFunctionBody(Decl, move(FnBody));
+}
+
+/// ParseCXXTryBlock - Parse a C++ try-block.
+///
+/// try-block:
+/// 'try' compound-statement handler-seq
+///
+Parser::OwningStmtResult Parser::ParseCXXTryBlock() {
+ assert(Tok.is(tok::kw_try) && "Expected 'try'");
+
+ SourceLocation TryLoc = ConsumeToken();
+ return ParseCXXTryBlockCommon(TryLoc);
+}
+
+/// ParseCXXTryBlockCommon - Parse the common part of try-block and
+/// function-try-block.
+///
+/// try-block:
+/// 'try' compound-statement handler-seq
+///
+/// function-try-block:
+/// 'try' ctor-initializer[opt] compound-statement handler-seq
+///
+/// handler-seq:
+/// handler handler-seq[opt]
+///
+Parser::OwningStmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected_lbrace));
+ OwningStmtResult TryBlock(ParseCompoundStatement());
+ if (TryBlock.isInvalid())
+ return move(TryBlock);
+
+ StmtVector Handlers(Actions);
+ if (Tok.isNot(tok::kw_catch))
+ return StmtError(Diag(Tok, diag::err_expected_catch));
+ while (Tok.is(tok::kw_catch)) {
+ OwningStmtResult Handler(ParseCXXCatchBlock());
+ if (!Handler.isInvalid())
+ Handlers.push_back(Handler.release());
+ }
+ // Don't bother creating the full statement if we don't have any usable
+ // handlers.
+ if (Handlers.empty())
+ return StmtError();
+
+ return Actions.ActOnCXXTryBlock(TryLoc, move(TryBlock), move_arg(Handlers));
+}
+
+/// ParseCXXCatchBlock - Parse a C++ catch block, called handler in the standard
+///
+/// handler:
+/// 'catch' '(' exception-declaration ')' compound-statement
+///
+/// exception-declaration:
+/// type-specifier-seq declarator
+/// type-specifier-seq abstract-declarator
+/// type-specifier-seq
+/// '...'
+///
+Parser::OwningStmtResult Parser::ParseCXXCatchBlock() {
+ assert(Tok.is(tok::kw_catch) && "Expected 'catch'");
+
+ SourceLocation CatchLoc = ConsumeToken();
+
+ SourceLocation LParenLoc = Tok.getLocation();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen))
+ return StmtError();
+
+ // C++ 3.3.2p3:
+ // The name in a catch exception-declaration is local to the handler and
+ // shall not be redeclared in the outermost block of the handler.
+ ParseScope CatchScope(this, Scope::DeclScope | Scope::ControlScope);
+
+ // exception-declaration is equivalent to '...' or a parameter-declaration
+ // without default arguments.
+ DeclPtrTy ExceptionDecl;
+ if (Tok.isNot(tok::ellipsis)) {
+ DeclSpec DS;
+ if (ParseCXXTypeSpecifierSeq(DS))
+ return StmtError();
+ Declarator ExDecl(DS, Declarator::CXXCatchContext);
+ ParseDeclarator(ExDecl);
+ ExceptionDecl = Actions.ActOnExceptionDeclarator(CurScope, ExDecl);
+ } else
+ ConsumeToken();
+
+ if (MatchRHSPunctuation(tok::r_paren, LParenLoc).isInvalid())
+ return StmtError();
+
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected_lbrace));
+
+ OwningStmtResult Block(ParseCompoundStatement());
+ if (Block.isInvalid())
+ return move(Block);
+
+ return Actions.ActOnCXXCatchBlock(CatchLoc, ExceptionDecl, move(Block));
+}
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
new file mode 100644
index 0000000..2a79b99
--- /dev/null
+++ b/lib/Parse/ParseTemplate.cpp
@@ -0,0 +1,812 @@
+//===--- ParseTemplate.cpp - Template Parsing -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements parsing of C++ templates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+using namespace clang;
+
+/// \brief Parse a template declaration, explicit instantiation, or
+/// explicit specialization.
+Parser::DeclPtrTy
+Parser::ParseDeclarationStartingWithTemplate(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
+ if (Tok.is(tok::kw_template) && NextToken().isNot(tok::less))
+ return ParseExplicitInstantiation(ConsumeToken(), DeclEnd);
+
+ return ParseTemplateDeclarationOrSpecialization(Context, DeclEnd, AS);
+}
+
+/// \brief Parse a template declaration or an explicit specialization.
+///
+/// Template declarations include one or more template parameter lists
+/// and either the function or class template declaration. Explicit
+/// specializations contain one or more 'template < >' prefixes
+/// followed by a (possibly templated) declaration. Since the
+/// syntactic form of both features is nearly identical, we parse all
+/// of the template headers together and let semantic analysis sort
+/// the declarations from the explicit specializations.
+///
+/// template-declaration: [C++ temp]
+/// 'export'[opt] 'template' '<' template-parameter-list '>' declaration
+///
+/// explicit-specialization: [ C++ temp.expl.spec]
+/// 'template' '<' '>' declaration
+Parser::DeclPtrTy
+Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
+ assert((Tok.is(tok::kw_export) || Tok.is(tok::kw_template)) &&
+ "Token does not start a template declaration.");
+
+ // Enter template-parameter scope.
+ ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+
+ // Parse multiple levels of template headers within this template
+ // parameter scope, e.g.,
+ //
+ // template<typename T>
+ // template<typename U>
+ // class A<T>::B { ... };
+ //
+ // We parse multiple levels non-recursively so that we can build a
+ // single data structure containing all of the template parameter
+ // lists to easily differentiate between the case above and:
+ //
+ // template<typename T>
+ // class A {
+ // template<typename U> class B;
+ // };
+ //
+ // In the first case, the action for declaring A<T>::B receives
+ // both template parameter lists. In the second case, the action for
+ // defining A<T>::B receives just the inner template parameter list
+ // (and retrieves the outer template parameter list from its
+ // context).
+ bool isSpecialiation = true;
+ TemplateParameterLists ParamLists;
+ do {
+ // Consume the 'export', if any.
+ SourceLocation ExportLoc;
+ if (Tok.is(tok::kw_export)) {
+ ExportLoc = ConsumeToken();
+ }
+
+ // Consume the 'template', which should be here.
+ SourceLocation TemplateLoc;
+ if (Tok.is(tok::kw_template)) {
+ TemplateLoc = ConsumeToken();
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_template);
+ return DeclPtrTy();
+ }
+
+ // Parse the '<' template-parameter-list '>'
+ SourceLocation LAngleLoc, RAngleLoc;
+ TemplateParameterList TemplateParams;
+ ParseTemplateParameters(ParamLists.size(), TemplateParams, LAngleLoc,
+ RAngleLoc);
+
+ if (!TemplateParams.empty())
+ isSpecialiation = false;
+
+ ParamLists.push_back(
+ Actions.ActOnTemplateParameterList(ParamLists.size(), ExportLoc,
+ TemplateLoc, LAngleLoc,
+ TemplateParams.data(),
+ TemplateParams.size(), RAngleLoc));
+ } while (Tok.is(tok::kw_export) || Tok.is(tok::kw_template));
+
+ // Parse the actual template declaration.
+ return ParseSingleDeclarationAfterTemplate(Context,
+ ParsedTemplateInfo(&ParamLists,
+ isSpecialiation),
+ DeclEnd, AS);
+}
+
+/// \brief Parse a single declaration that declares a template,
+/// template specialization, or explicit instantiation of a template.
+///
+/// \param TemplateParams if non-NULL, the template parameter lists
+/// that preceded this declaration. In this case, the declaration is a
+/// template declaration, out-of-line definition of a template, or an
+/// explicit template specialization. When NULL, the declaration is an
+/// explicit template instantiation.
+///
+/// \param TemplateLoc when TemplateParams is NULL, the location of
+/// the 'template' keyword that indicates that we have an explicit
+/// template instantiation.
+///
+/// \param DeclEnd will receive the source location of the last token
+/// within this declaration.
+///
+/// \param AS the access specifier associated with this
+/// declaration. Will be AS_none for namespace-scope declarations.
+///
+/// \returns the new declaration.
+Parser::DeclPtrTy
+Parser::ParseSingleDeclarationAfterTemplate(
+ unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
+ assert(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ "Template information required");
+
+ // Parse the declaration specifiers.
+ DeclSpec DS;
+ // FIXME: Pass TemplateLoc through for explicit template instantiations
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS);
+
+ if (Tok.is(tok::semi)) {
+ DeclEnd = ConsumeToken();
+ return Actions.ParsedFreeStandingDeclSpec(CurScope, DS);
+ }
+
+ // Parse the declarator.
+ Declarator DeclaratorInfo(DS, (Declarator::TheContext)Context);
+ ParseDeclarator(DeclaratorInfo);
+ // Error parsing the declarator?
+ if (!DeclaratorInfo.hasName()) {
+ // If so, skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return DeclPtrTy();
+ }
+
+ // If we have a declaration or declarator list, handle it.
+ if (isDeclarationAfterDeclarator()) {
+ // Parse this declaration.
+ DeclPtrTy ThisDecl = ParseDeclarationAfterDeclarator(DeclaratorInfo);
+
+ if (Tok.is(tok::comma)) {
+ Diag(Tok, diag::err_multiple_template_declarators)
+ << (int)TemplateInfo.Kind;
+ SkipUntil(tok::semi, true, false);
+ return ThisDecl;
+ }
+
+ // Eat the semi colon after the declaration.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_declation);
+ return ThisDecl;
+ }
+
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ isStartOfFunctionDefinition()) {
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(Tok, diag::err_function_declared_typedef);
+
+ if (Tok.is(tok::l_brace)) {
+ // This recovery skips the entire function body. It would be nice
+ // to simply call ParseFunctionDefinition() below, however Sema
+ // assumes the declarator represents a function, not a typedef.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, true);
+ } else {
+ SkipUntil(tok::semi);
+ }
+ return DeclPtrTy();
+ }
+ return ParseFunctionDefinition(DeclaratorInfo);
+ }
+
+ if (DeclaratorInfo.isFunctionDeclarator())
+ Diag(Tok, diag::err_expected_fn_body);
+ else
+ Diag(Tok, diag::err_invalid_token_after_toplevel_declarator);
+ SkipUntil(tok::semi);
+ return DeclPtrTy();
+}
+
+/// ParseTemplateParameters - Parses a template-parameter-list enclosed in
+/// angle brackets. Depth is the depth of this template-parameter-list, which
+/// is the number of template headers directly enclosing this template header.
+/// TemplateParams is the current list of template parameters we're building.
+/// The template parameter we parse will be added to this list. LAngleLoc and
+/// RAngleLoc will receive the positions of the '<' and '>', respectively,
+/// that enclose this template parameter list.
+bool Parser::ParseTemplateParameters(unsigned Depth,
+ TemplateParameterList &TemplateParams,
+ SourceLocation &LAngleLoc,
+ SourceLocation &RAngleLoc) {
+ // Get the template parameter list.
+ if(!Tok.is(tok::less)) {
+ Diag(Tok.getLocation(), diag::err_expected_less_after) << "template";
+ return false;
+ }
+ LAngleLoc = ConsumeToken();
+
+ // Try to parse the template parameter list.
+ if (Tok.is(tok::greater))
+ RAngleLoc = ConsumeToken();
+ else if(ParseTemplateParameterList(Depth, TemplateParams)) {
+ if(!Tok.is(tok::greater)) {
+ Diag(Tok.getLocation(), diag::err_expected_greater);
+ return false;
+ }
+ RAngleLoc = ConsumeToken();
+ }
+ return true;
+}
+
+/// ParseTemplateParameterList - Parse a template parameter list. If
+/// the parsing fails badly (i.e., closing bracket was left out), this
+/// will try to put the token stream in a reasonable position (closing
+/// a statement, etc.) and return false.
+///
+/// template-parameter-list: [C++ temp]
+/// template-parameter
+/// template-parameter-list ',' template-parameter
+bool
+Parser::ParseTemplateParameterList(unsigned Depth,
+ TemplateParameterList &TemplateParams) {
+ while(1) {
+ if (DeclPtrTy TmpParam
+ = ParseTemplateParameter(Depth, TemplateParams.size())) {
+ TemplateParams.push_back(TmpParam);
+ } else {
+ // If we failed to parse a template parameter, skip until we find
+ // a comma or closing brace.
+ SkipUntil(tok::comma, tok::greater, true, true);
+ }
+
+ // Did we find a comma or the end of the template parmeter list?
+ if(Tok.is(tok::comma)) {
+ ConsumeToken();
+ } else if(Tok.is(tok::greater)) {
+ // Don't consume this... that's done by template parser.
+ break;
+ } else {
+ // Somebody probably forgot to close the template. Skip ahead and
+ // try to get out of the expression. This error is currently
+ // subsumed by whatever goes on in ParseTemplateParameter.
+ // TODO: This could match >>, and it would be nice to avoid those
+ // silly errors with template <vec<T>>.
+ // Diag(Tok.getLocation(), diag::err_expected_comma_greater);
+ SkipUntil(tok::greater, true, true);
+ return false;
+ }
+ }
+ return true;
+}
+
+/// ParseTemplateParameter - Parse a template-parameter (C++ [temp.param]).
+///
+/// template-parameter: [C++ temp.param]
+/// type-parameter
+/// parameter-declaration
+///
+/// type-parameter: (see below)
+/// 'class' identifier[opt]
+/// 'class' identifier[opt] '=' type-id
+/// 'typename' identifier[opt]
+/// 'typename' identifier[opt] '=' type-id
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt] = id-expression
+Parser::DeclPtrTy
+Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
+ if(Tok.is(tok::kw_class) ||
+ (Tok.is(tok::kw_typename) &&
+ // FIXME: Next token has not been annotated!
+ NextToken().isNot(tok::annot_typename))) {
+ return ParseTypeParameter(Depth, Position);
+ }
+
+ if(Tok.is(tok::kw_template))
+ return ParseTemplateTemplateParameter(Depth, Position);
+
+ // If it's none of the above, then it must be a parameter declaration.
+ // NOTE: This will pick up errors in the closure of the template parameter
+ // list (e.g., template < ; Check here to implement >> style closures.
+ return ParseNonTypeTemplateParameter(Depth, Position);
+}
+
+/// ParseTypeParameter - Parse a template type parameter (C++ [temp.param]).
+/// Other kinds of template parameters are parsed in
+/// ParseTemplateTemplateParameter and ParseNonTypeTemplateParameter.
+///
+/// type-parameter: [C++ temp.param]
+/// 'class' identifier[opt]
+/// 'class' identifier[opt] '=' type-id
+/// 'typename' identifier[opt]
+/// 'typename' identifier[opt] '=' type-id
+Parser::DeclPtrTy Parser::ParseTypeParameter(unsigned Depth, unsigned Position){
+ assert((Tok.is(tok::kw_class) || Tok.is(tok::kw_typename)) &&
+ "A type-parameter starts with 'class' or 'typename'");
+
+ // Consume the 'class' or 'typename' keyword.
+ bool TypenameKeyword = Tok.is(tok::kw_typename);
+ SourceLocation KeyLoc = ConsumeToken();
+
+ // Grab the template parameter name (if given)
+ SourceLocation NameLoc;
+ IdentifierInfo* ParamName = 0;
+ if(Tok.is(tok::identifier)) {
+ ParamName = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ } else if(Tok.is(tok::equal) || Tok.is(tok::comma) ||
+ Tok.is(tok::greater)) {
+ // Unnamed template parameter. Don't have to do anything here, just
+ // don't consume this token.
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_ident);
+ return DeclPtrTy();
+ }
+
+ DeclPtrTy TypeParam = Actions.ActOnTypeParameter(CurScope, TypenameKeyword,
+ KeyLoc, ParamName, NameLoc,
+ Depth, Position);
+
+ // Grab a default type id (if given).
+ if(Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = ConsumeToken();
+ SourceLocation DefaultLoc = Tok.getLocation();
+ TypeResult DefaultType = ParseTypeName();
+ if (!DefaultType.isInvalid())
+ Actions.ActOnTypeParameterDefault(TypeParam, EqualLoc, DefaultLoc,
+ DefaultType.get());
+ }
+
+ return TypeParam;
+}
+
+/// ParseTemplateTemplateParameter - Handle the parsing of template
+/// template parameters.
+///
+/// type-parameter: [C++ temp.param]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt] = id-expression
+Parser::DeclPtrTy
+Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
+ assert(Tok.is(tok::kw_template) && "Expected 'template' keyword");
+
+ // Handle the template <...> part.
+ SourceLocation TemplateLoc = ConsumeToken();
+ TemplateParameterList TemplateParams;
+ SourceLocation LAngleLoc, RAngleLoc;
+ {
+ ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+ if(!ParseTemplateParameters(Depth + 1, TemplateParams, LAngleLoc,
+ RAngleLoc)) {
+ return DeclPtrTy();
+ }
+ }
+
+ // Generate a meaningful error if the user forgot to put class before the
+ // identifier, comma, or greater.
+ if(!Tok.is(tok::kw_class)) {
+ Diag(Tok.getLocation(), diag::err_expected_class_before)
+ << PP.getSpelling(Tok);
+ return DeclPtrTy();
+ }
+ SourceLocation ClassLoc = ConsumeToken();
+
+ // Get the identifier, if given.
+ SourceLocation NameLoc;
+ IdentifierInfo* ParamName = 0;
+ if(Tok.is(tok::identifier)) {
+ ParamName = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ } else if(Tok.is(tok::equal) || Tok.is(tok::comma) || Tok.is(tok::greater)) {
+ // Unnamed template parameter. Don't have to do anything here, just
+ // don't consume this token.
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_ident);
+ return DeclPtrTy();
+ }
+
+ TemplateParamsTy *ParamList =
+ Actions.ActOnTemplateParameterList(Depth, SourceLocation(),
+ TemplateLoc, LAngleLoc,
+ &TemplateParams[0],
+ TemplateParams.size(),
+ RAngleLoc);
+
+ Parser::DeclPtrTy Param
+ = Actions.ActOnTemplateTemplateParameter(CurScope, TemplateLoc,
+ ParamList, ParamName,
+ NameLoc, Depth, Position);
+
+ // Get the a default value, if given.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = ConsumeToken();
+ OwningExprResult DefaultExpr = ParseCXXIdExpression();
+ if (DefaultExpr.isInvalid())
+ return Param;
+ else if (Param)
+ Actions.ActOnTemplateTemplateParameterDefault(Param, EqualLoc,
+ move(DefaultExpr));
+ }
+
+ return Param;
+}
+
+/// ParseNonTypeTemplateParameter - Handle the parsing of non-type
+/// template parameters (e.g., in "template<int Size> class array;").
+///
+/// template-parameter:
+/// ...
+/// parameter-declaration
+///
+/// NOTE: It would be ideal to simply call out to ParseParameterDeclaration(),
+/// but that didn't work out to well. Instead, this tries to recrate the basic
+/// parsing of parameter declarations, but tries to constrain it for template
+/// parameters.
+/// FIXME: We need to make a ParseParameterDeclaration that works for
+/// non-type template parameters and normal function parameters.
+Parser::DeclPtrTy
+Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
+ SourceLocation StartLoc = Tok.getLocation();
+
+ // Parse the declaration-specifiers (i.e., the type).
+ // FIXME: The type should probably be restricted in some way... Not all
+ // declarators (parts of declarators?) are accepted for parameters.
+ DeclSpec DS;
+ ParseDeclarationSpecifiers(DS);
+
+ // Parse this as a typename.
+ Declarator ParamDecl(DS, Declarator::TemplateParamContext);
+ ParseDeclarator(ParamDecl);
+ if (DS.getTypeSpecType() == DeclSpec::TST_unspecified && !DS.getTypeRep()) {
+ // This probably shouldn't happen - and it's more of a Sema thing, but
+ // basically we didn't parse the type name because we couldn't associate
+ // it with an AST node. we should just skip to the comma or greater.
+ // TODO: This is currently a placeholder for some kind of Sema Error.
+ Diag(Tok.getLocation(), diag::err_parse_error);
+ SkipUntil(tok::comma, tok::greater, true, true);
+ return DeclPtrTy();
+ }
+
+ // Create the parameter.
+ DeclPtrTy Param = Actions.ActOnNonTypeTemplateParameter(CurScope, ParamDecl,
+ Depth, Position);
+
+ // If there is a default value, parse it.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = ConsumeToken();
+
+ // C++ [temp.param]p15:
+ // When parsing a default template-argument for a non-type
+ // template-parameter, the first non-nested > is taken as the
+ // end of the template-parameter-list rather than a greater-than
+ // operator.
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+
+ OwningExprResult DefaultArg = ParseAssignmentExpression();
+ if (DefaultArg.isInvalid())
+ SkipUntil(tok::comma, tok::greater, true, true);
+ else if (Param)
+ Actions.ActOnNonTypeTemplateParameterDefault(Param, EqualLoc,
+ move(DefaultArg));
+ }
+
+ return Param;
+}
+
+/// \brief Parses a template-id that after the template name has
+/// already been parsed.
+///
+/// This routine takes care of parsing the enclosed template argument
+/// list ('<' template-parameter-list [opt] '>') and placing the
+/// results into a form that can be transferred to semantic analysis.
+///
+/// \param Template the template declaration produced by isTemplateName
+///
+/// \param TemplateNameLoc the source location of the template name
+///
+/// \param SS if non-NULL, the nested-name-specifier preceding the
+/// template name.
+///
+/// \param ConsumeLastToken if true, then we will consume the last
+/// token that forms the template-id. Otherwise, we will leave the
+/// last token in the stream (e.g., so that it can be replaced with an
+/// annotation token).
+bool
+Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ const CXXScopeSpec *SS,
+ bool ConsumeLastToken,
+ SourceLocation &LAngleLoc,
+ TemplateArgList &TemplateArgs,
+ TemplateArgIsTypeList &TemplateArgIsType,
+ TemplateArgLocationList &TemplateArgLocations,
+ SourceLocation &RAngleLoc) {
+ assert(Tok.is(tok::less) && "Must have already parsed the template-name");
+
+ // Consume the '<'.
+ LAngleLoc = ConsumeToken();
+
+ // Parse the optional template-argument-list.
+ bool Invalid = false;
+ {
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+ if (Tok.isNot(tok::greater))
+ Invalid = ParseTemplateArgumentList(TemplateArgs, TemplateArgIsType,
+ TemplateArgLocations);
+
+ if (Invalid) {
+ // Try to find the closing '>'.
+ SkipUntil(tok::greater, true, !ConsumeLastToken);
+
+ return true;
+ }
+ }
+
+ if (Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater))
+ return true;
+
+ // Determine the location of the '>' or '>>'. Only consume this
+ // token if the caller asked us to.
+ RAngleLoc = Tok.getLocation();
+
+ if (Tok.is(tok::greatergreater)) {
+ if (!getLang().CPlusPlus0x) {
+ const char *ReplaceStr = "> >";
+ if (NextToken().is(tok::greater) || NextToken().is(tok::greatergreater))
+ ReplaceStr = "> > ";
+
+ Diag(Tok.getLocation(), diag::err_two_right_angle_brackets_need_space)
+ << CodeModificationHint::CreateReplacement(
+ SourceRange(Tok.getLocation()), ReplaceStr);
+ }
+
+ Tok.setKind(tok::greater);
+ if (!ConsumeLastToken) {
+ // Since we're not supposed to consume the '>>' token, we need
+ // to insert a second '>' token after the first.
+ PP.EnterToken(Tok);
+ }
+ } else if (ConsumeLastToken)
+ ConsumeToken();
+
+ return false;
+}
+
+/// \brief Replace the tokens that form a simple-template-id with an
+/// annotation token containing the complete template-id.
+///
+/// The first token in the stream must be the name of a template that
+/// is followed by a '<'. This routine will parse the complete
+/// simple-template-id and replace the tokens with a single annotation
+/// token with one of two different kinds: if the template-id names a
+/// type (and \p AllowTypeAnnotation is true), the annotation token is
+/// a type annotation that includes the optional nested-name-specifier
+/// (\p SS). Otherwise, the annotation token is a template-id
+/// annotation that does not include the optional
+/// nested-name-specifier.
+///
+/// \param Template the declaration of the template named by the first
+/// token (an identifier), as returned from \c Action::isTemplateName().
+///
+/// \param TemplateNameKind the kind of template that \p Template
+/// refers to, as returned from \c Action::isTemplateName().
+///
+/// \param SS if non-NULL, the nested-name-specifier that precedes
+/// this template name.
+///
+/// \param TemplateKWLoc if valid, specifies that this template-id
+/// annotation was preceded by the 'template' keyword and gives the
+/// location of that keyword. If invalid (the default), then this
+/// template-id was not preceded by a 'template' keyword.
+///
+/// \param AllowTypeAnnotation if true (the default), then a
+/// simple-template-id that refers to a class template, template
+/// template parameter, or other template that produces a type will be
+/// replaced with a type annotation token. Otherwise, the
+/// simple-template-id is always replaced with a template-id
+/// annotation token.
+void Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
+ const CXXScopeSpec *SS,
+ SourceLocation TemplateKWLoc,
+ bool AllowTypeAnnotation) {
+ assert(getLang().CPlusPlus && "Can only annotate template-ids in C++");
+ assert(Template && Tok.is(tok::identifier) && NextToken().is(tok::less) &&
+ "Parser isn't at the beginning of a template-id");
+
+ // Consume the template-name.
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation TemplateNameLoc = ConsumeToken();
+
+ // Parse the enclosed template argument list.
+ SourceLocation LAngleLoc, RAngleLoc;
+ TemplateArgList TemplateArgs;
+ TemplateArgIsTypeList TemplateArgIsType;
+ TemplateArgLocationList TemplateArgLocations;
+ bool Invalid = ParseTemplateIdAfterTemplateName(Template, TemplateNameLoc,
+ SS, false, LAngleLoc,
+ TemplateArgs,
+ TemplateArgIsType,
+ TemplateArgLocations,
+ RAngleLoc);
+
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions, TemplateArgs.data(),
+ TemplateArgIsType.data(),
+ TemplateArgs.size());
+
+ if (Invalid) // FIXME: How to recover from a broken template-id?
+ return;
+
+ // Build the annotation token.
+ if (TNK == TNK_Type_template && AllowTypeAnnotation) {
+ Action::TypeResult Type
+ = Actions.ActOnTemplateIdType(Template, TemplateNameLoc,
+ LAngleLoc, TemplateArgsPtr,
+ &TemplateArgLocations[0],
+ RAngleLoc);
+ if (Type.isInvalid()) // FIXME: better recovery?
+ return;
+
+ Tok.setKind(tok::annot_typename);
+ Tok.setAnnotationValue(Type.get());
+ if (SS && SS->isNotEmpty())
+ Tok.setLocation(SS->getBeginLoc());
+ else if (TemplateKWLoc.isValid())
+ Tok.setLocation(TemplateKWLoc);
+ else
+ Tok.setLocation(TemplateNameLoc);
+ } else {
+ // Build a template-id annotation token that can be processed
+ // later.
+ Tok.setKind(tok::annot_template_id);
+ TemplateIdAnnotation *TemplateId
+ = TemplateIdAnnotation::Allocate(TemplateArgs.size());
+ TemplateId->TemplateNameLoc = TemplateNameLoc;
+ TemplateId->Name = Name;
+ TemplateId->Template = Template.getAs<void*>();
+ TemplateId->Kind = TNK;
+ TemplateId->LAngleLoc = LAngleLoc;
+ TemplateId->RAngleLoc = RAngleLoc;
+ void **Args = TemplateId->getTemplateArgs();
+ bool *ArgIsType = TemplateId->getTemplateArgIsType();
+ SourceLocation *ArgLocs = TemplateId->getTemplateArgLocations();
+ for (unsigned Arg = 0, ArgEnd = TemplateArgs.size(); Arg != ArgEnd; ++Arg) {
+ Args[Arg] = TemplateArgs[Arg];
+ ArgIsType[Arg] = TemplateArgIsType[Arg];
+ ArgLocs[Arg] = TemplateArgLocations[Arg];
+ }
+ Tok.setAnnotationValue(TemplateId);
+ if (TemplateKWLoc.isValid())
+ Tok.setLocation(TemplateKWLoc);
+ else
+ Tok.setLocation(TemplateNameLoc);
+
+ TemplateArgsPtr.release();
+ }
+
+ // Common fields for the annotation token
+ Tok.setAnnotationEndLoc(RAngleLoc);
+
+ // In case the tokens were cached, have Preprocessor replace them with the
+ // annotation token.
+ PP.AnnotateCachedTokens(Tok);
+}
+
+/// \brief Replaces a template-id annotation token with a type
+/// annotation token.
+///
+/// If there was a failure when forming the type from the template-id,
+/// a type annotation token will still be created, but will have a
+/// NULL type pointer to signify an error.
+void Parser::AnnotateTemplateIdTokenAsType(const CXXScopeSpec *SS) {
+ assert(Tok.is(tok::annot_template_id) && "Requires template-id tokens");
+
+ TemplateIdAnnotation *TemplateId
+ = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ assert((TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) &&
+ "Only works for type and dependent templates");
+
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions,
+ TemplateId->getTemplateArgs(),
+ TemplateId->getTemplateArgIsType(),
+ TemplateId->NumArgs);
+
+ Action::TypeResult Type
+ = Actions.ActOnTemplateIdType(TemplateTy::make(TemplateId->Template),
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->getTemplateArgLocations(),
+ TemplateId->RAngleLoc);
+ // Create the new "type" annotation token.
+ Tok.setKind(tok::annot_typename);
+ Tok.setAnnotationValue(Type.isInvalid()? 0 : Type.get());
+ if (SS && SS->isNotEmpty()) // it was a C++ qualified type name.
+ Tok.setLocation(SS->getBeginLoc());
+
+ // We might be backtracking, in which case we need to replace the
+ // template-id annotation token with the type annotation within the
+ // set of cached tokens. That way, we won't try to form the same
+ // class template specialization again.
+ PP.ReplaceLastTokenWithAnnotation(Tok);
+ TemplateId->Destroy();
+}
+
+/// ParseTemplateArgument - Parse a C++ template argument (C++ [temp.names]).
+///
+/// template-argument: [C++ 14.2]
+/// assignment-expression
+/// type-id
+/// id-expression
+void *Parser::ParseTemplateArgument(bool &ArgIsType) {
+ // C++ [temp.arg]p2:
+ // In a template-argument, an ambiguity between a type-id and an
+ // expression is resolved to a type-id, regardless of the form of
+ // the corresponding template-parameter.
+ //
+ // Therefore, we initially try to parse a type-id.
+ if (isCXXTypeId(TypeIdAsTemplateArgument)) {
+ ArgIsType = true;
+ TypeResult TypeArg = ParseTypeName();
+ if (TypeArg.isInvalid())
+ return 0;
+ return TypeArg.get();
+ }
+
+ OwningExprResult ExprArg = ParseAssignmentExpression();
+ if (ExprArg.isInvalid() || !ExprArg.get())
+ return 0;
+
+ ArgIsType = false;
+ return ExprArg.release();
+}
+
+/// ParseTemplateArgumentList - Parse a C++ template-argument-list
+/// (C++ [temp.names]). Returns true if there was an error.
+///
+/// template-argument-list: [C++ 14.2]
+/// template-argument
+/// template-argument-list ',' template-argument
+bool
+Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
+ TemplateArgIsTypeList &TemplateArgIsType,
+ TemplateArgLocationList &TemplateArgLocations) {
+ while (true) {
+ bool IsType = false;
+ SourceLocation Loc = Tok.getLocation();
+ void *Arg = ParseTemplateArgument(IsType);
+ if (Arg) {
+ TemplateArgs.push_back(Arg);
+ TemplateArgIsType.push_back(IsType);
+ TemplateArgLocations.push_back(Loc);
+ } else {
+ SkipUntil(tok::comma, tok::greater, true, true);
+ return true;
+ }
+
+ // If the next token is a comma, consume it and keep reading
+ // arguments.
+ if (Tok.isNot(tok::comma)) break;
+
+ // Consume the comma.
+ ConsumeToken();
+ }
+
+ return Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater);
+}
+
+/// \brief Parse a C++ explicit template instantiation
+/// (C++ [temp.explicit]).
+///
+/// explicit-instantiation:
+/// 'template' declaration
+Parser::DeclPtrTy
+Parser::ParseExplicitInstantiation(SourceLocation TemplateLoc,
+ SourceLocation &DeclEnd) {
+ return ParseSingleDeclarationAfterTemplate(Declarator::FileContext,
+ ParsedTemplateInfo(TemplateLoc),
+ DeclEnd, AS_none);
+}
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
new file mode 100644
index 0000000..81696d6
--- /dev/null
+++ b/lib/Parse/ParseTentative.cpp
@@ -0,0 +1,920 @@
+//===--- ParseTentative.cpp - Ambiguity Resolution Parsing ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the tentative parsing portions of the Parser
+// interfaces, for ambiguity resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+using namespace clang;
+
+/// isCXXDeclarationStatement - C++-specialized function that disambiguates
+/// between a declaration or an expression statement, when parsing function
+/// bodies. Returns true for declaration, false for expression.
+///
+/// declaration-statement:
+/// block-declaration
+///
+/// block-declaration:
+/// simple-declaration
+/// asm-definition
+/// namespace-alias-definition
+/// using-declaration
+/// using-directive
+/// [C++0x] static_assert-declaration
+///
+/// asm-definition:
+/// 'asm' '(' string-literal ')' ';'
+///
+/// namespace-alias-definition:
+/// 'namespace' identifier = qualified-namespace-specifier ';'
+///
+/// using-declaration:
+/// 'using' typename[opt] '::'[opt] nested-name-specifier
+/// unqualified-id ';'
+/// 'using' '::' unqualified-id ;
+///
+/// using-directive:
+/// 'using' 'namespace' '::'[opt] nested-name-specifier[opt]
+/// namespace-name ';'
+///
+bool Parser::isCXXDeclarationStatement() {
+ switch (Tok.getKind()) {
+ // asm-definition
+ case tok::kw_asm:
+ // namespace-alias-definition
+ case tok::kw_namespace:
+ // using-declaration
+ // using-directive
+ case tok::kw_using:
+ return true;
+ case tok::kw_static_assert:
+ // static_assert-declaration
+ return true;
+ default:
+ // simple-declaration
+ return isCXXSimpleDeclaration();
+ }
+}
+
+/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
+/// between a simple-declaration or an expression-statement.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+/// Returns false if the statement is disambiguated as expression.
+///
+/// simple-declaration:
+/// decl-specifier-seq init-declarator-list[opt] ';'
+///
+bool Parser::isCXXSimpleDeclaration() {
+ // C++ 6.8p1:
+ // There is an ambiguity in the grammar involving expression-statements and
+ // declarations: An expression-statement with a function-style explicit type
+ // conversion (5.2.3) as its leftmost subexpression can be indistinguishable
+ // from a declaration where the first declarator starts with a '('. In those
+ // cases the statement is a declaration. [Note: To disambiguate, the whole
+ // statement might have to be examined to determine if it is an
+ // expression-statement or a declaration].
+
+ // C++ 6.8p3:
+ // The disambiguation is purely syntactic; that is, the meaning of the names
+ // occurring in such a statement, beyond whether they are type-names or not,
+ // is not generally used in or changed by the disambiguation. Class
+ // templates are instantiated as necessary to determine if a qualified name
+ // is a type-name. Disambiguation precedes parsing, and a statement
+ // disambiguated as a declaration may be an ill-formed declaration.
+
+ // We don't have to parse all of the decl-specifier-seq part. There's only
+ // an ambiguity if the first decl-specifier is
+ // simple-type-specifier/typename-specifier followed by a '(', which may
+ // indicate a function-style cast expression.
+ // isCXXDeclarationSpecifier will return TPResult::Ambiguous() only in such
+ // a case.
+
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR != TPResult::False(); // Returns true for TPResult::True() or
+ // TPResult::Error().
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+
+ TPR = TryParseSimpleDeclaration();
+ SourceLocation TentativeParseLoc = Tok.getLocation();
+
+ PA.Revert();
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ return true;
+
+ // Declarations take precedence over expressions.
+ if (TPR == TPResult::Ambiguous())
+ TPR = TPResult::True();
+
+ assert(TPR == TPResult::True() || TPR == TPResult::False());
+ return TPR == TPResult::True();
+}
+
+/// simple-declaration:
+/// decl-specifier-seq init-declarator-list[opt] ';'
+///
+Parser::TPResult Parser::TryParseSimpleDeclaration() {
+ // We know that we have a simple-type-specifier/typename-specifier followed
+ // by a '('.
+ assert(isCXXDeclarationSpecifier() == TPResult::Ambiguous());
+
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else
+ ConsumeToken();
+
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ TPResult TPR = TryParseInitDeclaratorList();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ if (Tok.isNot(tok::semi))
+ return TPResult::False();
+
+ return TPResult::Ambiguous();
+}
+
+/// init-declarator-list:
+/// init-declarator
+/// init-declarator-list ',' init-declarator
+///
+/// init-declarator:
+/// declarator initializer[opt]
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt] initializer[opt]
+///
+/// initializer:
+/// '=' initializer-clause
+/// '(' expression-list ')'
+///
+/// initializer-clause:
+/// assignment-expression
+/// '{' initializer-list ','[opt] '}'
+/// '{' '}'
+///
+Parser::TPResult Parser::TryParseInitDeclaratorList() {
+ // GCC only examines the first declarator for disambiguation:
+ // i.e:
+ // int(x), ++x; // GCC regards it as ill-formed declaration.
+ //
+ // Comeau and MSVC will regard the above statement as correct expression.
+ // Clang examines all of the declarators and also regards the above statement
+ // as correct expression.
+
+ while (1) {
+ // declarator
+ TPResult TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ // [GNU] simple-asm-expr[opt] attributes[opt]
+ if (Tok.is(tok::kw_asm) || Tok.is(tok::kw___attribute))
+ return TPResult::True();
+
+ // initializer[opt]
+ if (Tok.is(tok::l_paren)) {
+ // Parse through the parens.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+ } else if (Tok.is(tok::equal)) {
+ // MSVC won't examine the rest of declarators if '=' is encountered, it
+ // will conclude that it is a declaration.
+ // Comeau and Clang will examine the rest of declarators.
+ // Note that "int(x) = {0}, ++x;" will be interpreted as ill-formed
+ // expression.
+ //
+ // Parse through the initializer-clause.
+ SkipUntil(tok::comma, true/*StopAtSemi*/, true/*DontConsume*/);
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // the comma.
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// isCXXConditionDeclaration - Disambiguates between a declaration or an
+/// expression for a condition of a if/switch/while/for statement.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+///
+/// condition:
+/// expression
+/// type-specifier-seq declarator '=' assignment-expression
+/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
+/// '=' assignment-expression
+///
+bool Parser::isCXXConditionDeclaration() {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR != TPResult::False(); // Returns true for TPResult::True() or
+ // TPResult::Error().
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+
+ // type-specifier-seq
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else
+ ConsumeToken();
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ // declarator
+ TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ TPR = TPResult::True();
+
+ if (TPR == TPResult::Ambiguous()) {
+ // '='
+ // [GNU] simple-asm-expr[opt] attributes[opt]
+ if (Tok.is(tok::equal) ||
+ Tok.is(tok::kw_asm) || Tok.is(tok::kw___attribute))
+ TPR = TPResult::True();
+ else
+ TPR = TPResult::False();
+ }
+
+ PA.Revert();
+
+ assert(TPR == TPResult::True() || TPR == TPResult::False());
+ return TPR == TPResult::True();
+}
+
+ /// \brief Determine whether the next set of tokens contains a type-id.
+ ///
+ /// The context parameter states what context we're parsing right
+ /// now, which affects how this routine copes with the token
+ /// following the type-id. If the context is TypeIdInParens, we have
+ /// already parsed the '(' and we will cease lookahead when we hit
+ /// the corresponding ')'. If the context is
+ /// TypeIdAsTemplateArgument, we've already parsed the '<' or ','
+ /// before this template argument, and will cease lookahead when we
+ /// hit a '>', '>>' (in C++0x), or ','. Returns true for a type-id
+ /// and false for an expression. If during the disambiguation
+ /// process a parsing error is encountered, the function returns
+ /// true to let the declaration parsing code handle it.
+ ///
+ /// type-id:
+ /// type-specifier-seq abstract-declarator[opt]
+ ///
+bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
+
+ isAmbiguous = false;
+
+ // C++ 8.2p2:
+ // The ambiguity arising from the similarity between a function-style cast and
+ // a type-id can occur in different contexts. The ambiguity appears as a
+ // choice between a function-style cast expression and a declaration of a
+ // type. The resolution is that any construct that could possibly be a type-id
+ // in its syntactic context shall be considered a type-id.
+
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR != TPResult::False(); // Returns true for TPResult::True() or
+ // TPResult::Error().
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+
+ // type-specifier-seq
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else
+ ConsumeToken();
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ // declarator
+ TPR = TryParseDeclarator(true/*mayBeAbstract*/, false/*mayHaveIdentifier*/);
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ TPR = TPResult::True();
+
+ if (TPR == TPResult::Ambiguous()) {
+ // We are supposed to be inside parens, so if after the abstract declarator
+ // we encounter a ')' this is a type-id, otherwise it's an expression.
+ if (Context == TypeIdInParens && Tok.is(tok::r_paren)) {
+ TPR = TPResult::True();
+ isAmbiguous = true;
+
+ // We are supposed to be inside a template argument, so if after
+ // the abstract declarator we encounter a '>', '>>' (in C++0x), or
+ // ',', this is a type-id. Otherwise, it's an expression.
+ } else if (Context == TypeIdAsTemplateArgument &&
+ (Tok.is(tok::greater) || Tok.is(tok::comma) ||
+ (getLang().CPlusPlus0x && Tok.is(tok::greatergreater)))) {
+ TPR = TPResult::True();
+ isAmbiguous = true;
+
+ } else
+ TPR = TPResult::False();
+ }
+
+ PA.Revert();
+
+ assert(TPR == TPResult::True() || TPR == TPResult::False());
+ return TPR == TPResult::True();
+}
+
+/// declarator:
+/// direct-declarator
+/// ptr-operator declarator
+///
+/// direct-declarator:
+/// declarator-id
+/// direct-declarator '(' parameter-declaration-clause ')'
+/// cv-qualifier-seq[opt] exception-specification[opt]
+/// direct-declarator '[' constant-expression[opt] ']'
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+///
+/// abstract-declarator:
+/// ptr-operator abstract-declarator[opt]
+/// direct-abstract-declarator
+///
+/// direct-abstract-declarator:
+/// direct-abstract-declarator[opt]
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+/// direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
+/// '(' abstract-declarator ')'
+///
+/// ptr-operator:
+/// '*' cv-qualifier-seq[opt]
+/// '&'
+/// [C++0x] '&&' [TODO]
+/// '::'[opt] nested-name-specifier '*' cv-qualifier-seq[opt]
+///
+/// cv-qualifier-seq:
+/// cv-qualifier cv-qualifier-seq[opt]
+///
+/// cv-qualifier:
+/// 'const'
+/// 'volatile'
+///
+/// declarator-id:
+/// id-expression
+///
+/// id-expression:
+/// unqualified-id
+/// qualified-id [TODO]
+///
+/// unqualified-id:
+/// identifier
+/// operator-function-id [TODO]
+/// conversion-function-id [TODO]
+/// '~' class-name [TODO]
+/// template-id [TODO]
+///
+Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
+ bool mayHaveIdentifier) {
+ // declarator:
+ // direct-declarator
+ // ptr-operator declarator
+
+ while (1) {
+ if (Tok.is(tok::coloncolon) || Tok.is(tok::identifier))
+ TryAnnotateCXXScopeToken();
+
+ if (Tok.is(tok::star) || Tok.is(tok::amp) || Tok.is(tok::caret) ||
+ (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::star))) {
+ // ptr-operator
+ ConsumeToken();
+ while (Tok.is(tok::kw_const) ||
+ Tok.is(tok::kw_volatile) ||
+ Tok.is(tok::kw_restrict))
+ ConsumeToken();
+ } else {
+ break;
+ }
+ }
+
+ // direct-declarator:
+ // direct-abstract-declarator:
+
+ if (Tok.is(tok::identifier) && mayHaveIdentifier) {
+ // declarator-id
+ ConsumeToken();
+ } else if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ if (mayBeAbstract &&
+ (Tok.is(tok::r_paren) || // 'int()' is a function.
+ Tok.is(tok::ellipsis) || // 'int(...)' is a function.
+ isDeclarationSpecifier())) { // 'int(int)' is a function.
+ // '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+ // exception-specification[opt]
+ TPResult TPR = TryParseFunctionDeclarator();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+ } else {
+ // '(' declarator ')'
+ // '(' attributes declarator ')'
+ // '(' abstract-declarator ')'
+ if (Tok.is(tok::kw___attribute))
+ return TPResult::True(); // attributes indicate declaration
+ TPResult TPR = TryParseDeclarator(mayBeAbstract, mayHaveIdentifier);
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+ if (Tok.isNot(tok::r_paren))
+ return TPResult::False();
+ ConsumeParen();
+ }
+ } else if (!mayBeAbstract) {
+ return TPResult::False();
+ }
+
+ while (1) {
+ TPResult TPR(TPResult::Ambiguous());
+
+ if (Tok.is(tok::l_paren)) {
+ // Check whether we have a function declarator or a possible ctor-style
+ // initializer that follows the declarator. Note that ctor-style
+ // initializers are not possible in contexts where abstract declarators
+ // are allowed.
+ if (!mayBeAbstract && !isCXXFunctionDeclarator(false/*warnIfAmbiguous*/))
+ break;
+
+ // direct-declarator '(' parameter-declaration-clause ')'
+ // cv-qualifier-seq[opt] exception-specification[opt]
+ ConsumeParen();
+ TPR = TryParseFunctionDeclarator();
+ } else if (Tok.is(tok::l_square)) {
+ // direct-declarator '[' constant-expression[opt] ']'
+ // direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
+ TPR = TryParseBracketDeclarator();
+ } else {
+ break;
+ }
+
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// isCXXDeclarationSpecifier - Returns TPResult::True() if it is a declaration
+/// specifier, TPResult::False() if it is not, TPResult::Ambiguous() if it could
+/// be either a decl-specifier or a function-style cast, and TPResult::Error()
+/// if a parsing error was found and reported.
+///
+/// decl-specifier:
+/// storage-class-specifier
+/// type-specifier
+/// function-specifier
+/// 'friend'
+/// 'typedef'
+/// [GNU] attributes declaration-specifiers[opt]
+///
+/// storage-class-specifier:
+/// 'register'
+/// 'static'
+/// 'extern'
+/// 'mutable'
+/// 'auto'
+/// [GNU] '__thread'
+///
+/// function-specifier:
+/// 'inline'
+/// 'virtual'
+/// 'explicit'
+///
+/// typedef-name:
+/// identifier
+///
+/// type-specifier:
+/// simple-type-specifier
+/// class-specifier
+/// enum-specifier
+/// elaborated-type-specifier
+/// typename-specifier
+/// cv-qualifier
+///
+/// simple-type-specifier:
+/// '::'[opt] nested-name-specifier[opt] type-name
+/// '::'[opt] nested-name-specifier 'template'
+/// simple-template-id [TODO]
+/// 'char'
+/// 'wchar_t'
+/// 'bool'
+/// 'short'
+/// 'int'
+/// 'long'
+/// 'signed'
+/// 'unsigned'
+/// 'float'
+/// 'double'
+/// 'void'
+/// [GNU] typeof-specifier
+/// [GNU] '_Complex'
+/// [C++0x] 'auto' [TODO]
+///
+/// type-name:
+/// class-name
+/// enum-name
+/// typedef-name
+///
+/// elaborated-type-specifier:
+/// class-key '::'[opt] nested-name-specifier[opt] identifier
+/// class-key '::'[opt] nested-name-specifier[opt] 'template'[opt]
+/// simple-template-id
+/// 'enum' '::'[opt] nested-name-specifier[opt] identifier
+///
+/// enum-name:
+/// identifier
+///
+/// enum-specifier:
+/// 'enum' identifier[opt] '{' enumerator-list[opt] '}'
+/// 'enum' identifier[opt] '{' enumerator-list ',' '}'
+///
+/// class-specifier:
+/// class-head '{' member-specification[opt] '}'
+///
+/// class-head:
+/// class-key identifier[opt] base-clause[opt]
+/// class-key nested-name-specifier identifier base-clause[opt]
+/// class-key nested-name-specifier[opt] simple-template-id
+/// base-clause[opt]
+///
+/// class-key:
+/// 'class'
+/// 'struct'
+/// 'union'
+///
+/// cv-qualifier:
+/// 'const'
+/// 'volatile'
+/// [GNU] restrict
+///
+Parser::TPResult Parser::isCXXDeclarationSpecifier() {
+ switch (Tok.getKind()) {
+ case tok::identifier: // foo::bar
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return isCXXDeclarationSpecifier();
+ // Otherwise, not a typename.
+ return TPResult::False();
+
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return TPResult::False();
+
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return isCXXDeclarationSpecifier();
+ // Otherwise, not a typename.
+ return TPResult::False();
+
+ // decl-specifier:
+ // storage-class-specifier
+ // type-specifier
+ // function-specifier
+ // 'friend'
+ // 'typedef'
+
+ case tok::kw_friend:
+ case tok::kw_typedef:
+ // storage-class-specifier
+ case tok::kw_register:
+ case tok::kw_static:
+ case tok::kw_extern:
+ case tok::kw_mutable:
+ case tok::kw_auto:
+ case tok::kw___thread:
+ // function-specifier
+ case tok::kw_inline:
+ case tok::kw_virtual:
+ case tok::kw_explicit:
+
+ // type-specifier:
+ // simple-type-specifier
+ // class-specifier
+ // enum-specifier
+ // elaborated-type-specifier
+ // typename-specifier
+ // cv-qualifier
+
+ // class-specifier
+ // elaborated-type-specifier
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+ // cv-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+
+ // GNU
+ case tok::kw_restrict:
+ case tok::kw__Complex:
+ case tok::kw___attribute:
+ return TPResult::True();
+
+ // Microsoft
+ case tok::kw___declspec:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ return PP.getLangOptions().Microsoft ? TPResult::True() : TPResult::False();
+
+ // The ambiguity resides in a simple-type-specifier/typename-specifier
+ // followed by a '('. The '(' could either be the start of:
+ //
+ // direct-declarator:
+ // '(' declarator ')'
+ //
+ // direct-abstract-declarator:
+ // '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+ // exception-specification[opt]
+ // '(' abstract-declarator ')'
+ //
+ // or part of a function-style cast expression:
+ //
+ // simple-type-specifier '(' expression-list[opt] ')'
+ //
+
+ // simple-type-specifier:
+
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::annot_typename:
+ if (NextToken().is(tok::l_paren))
+ return TPResult::Ambiguous();
+
+ return TPResult::True();
+
+ // GNU typeof support.
+ case tok::kw_typeof: {
+ if (NextToken().isNot(tok::l_paren))
+ return TPResult::True();
+
+ TentativeParsingAction PA(*this);
+
+ TPResult TPR = TryParseTypeofSpecifier();
+ bool isFollowedByParen = Tok.is(tok::l_paren);
+
+ PA.Revert();
+
+ if (TPR == TPResult::Error())
+ return TPResult::Error();
+
+ if (isFollowedByParen)
+ return TPResult::Ambiguous();
+
+ return TPResult::True();
+ }
+
+ default:
+ return TPResult::False();
+ }
+}
+
+/// [GNU] typeof-specifier:
+/// 'typeof' '(' expressions ')'
+/// 'typeof' '(' type-name ')'
+///
+Parser::TPResult Parser::TryParseTypeofSpecifier() {
+ assert(Tok.is(tok::kw_typeof) && "Expected 'typeof'!");
+ ConsumeToken();
+
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+ // Parse through the parens after 'typeof'.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+
+ return TPResult::Ambiguous();
+}
+
+Parser::TPResult Parser::TryParseDeclarationSpecifier() {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else
+ ConsumeToken();
+
+ assert(Tok.is(tok::l_paren) && "Expected '('!");
+ return TPResult::Ambiguous();
+}
+
+/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
+/// a constructor-style initializer, when parsing declaration statements.
+/// Returns true for function declarator and false for constructor-style
+/// initializer.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+///
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+///
+bool Parser::isCXXFunctionDeclarator(bool warnIfAmbiguous) {
+
+ // C++ 8.2p1:
+ // The ambiguity arising from the similarity between a function-style cast and
+ // a declaration mentioned in 6.8 can also occur in the context of a
+ // declaration. In that context, the choice is between a function declaration
+ // with a redundant set of parentheses around a parameter name and an object
+ // declaration with a function-style cast as the initializer. Just as for the
+ // ambiguities mentioned in 6.8, the resolution is to consider any construct
+ // that could possibly be a declaration a declaration.
+
+ TentativeParsingAction PA(*this);
+
+ ConsumeParen();
+ TPResult TPR = TryParseParameterDeclarationClause();
+ if (TPR == TPResult::Ambiguous() && Tok.isNot(tok::r_paren))
+ TPR = TPResult::False();
+
+ SourceLocation TPLoc = Tok.getLocation();
+ PA.Revert();
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ return true;
+
+ if (TPR == TPResult::Ambiguous()) {
+ // Function declarator has precedence over constructor-style initializer.
+ // Emit a warning just in case the author intended a variable definition.
+ if (warnIfAmbiguous)
+ Diag(Tok, diag::warn_parens_disambiguated_as_function_decl)
+ << SourceRange(Tok.getLocation(), TPLoc);
+ return true;
+ }
+
+ return TPR == TPResult::True();
+}
+
+/// parameter-declaration-clause:
+/// parameter-declaration-list[opt] '...'[opt]
+/// parameter-declaration-list ',' '...'
+///
+/// parameter-declaration-list:
+/// parameter-declaration
+/// parameter-declaration-list ',' parameter-declaration
+///
+/// parameter-declaration:
+/// decl-specifier-seq declarator
+/// decl-specifier-seq declarator '=' assignment-expression
+/// decl-specifier-seq abstract-declarator[opt]
+/// decl-specifier-seq abstract-declarator[opt] '=' assignment-expression
+///
+Parser::TPResult Parser::TryParseParameterDeclarationClause() {
+
+ if (Tok.is(tok::r_paren))
+ return TPResult::True();
+
+ // parameter-declaration-list[opt] '...'[opt]
+ // parameter-declaration-list ',' '...'
+ //
+ // parameter-declaration-list:
+ // parameter-declaration
+ // parameter-declaration-list ',' parameter-declaration
+ //
+ while (1) {
+ // '...'[opt]
+ if (Tok.is(tok::ellipsis)) {
+ ConsumeToken();
+ return TPResult::True(); // '...' is a sign of a function declarator.
+ }
+
+ // decl-specifier-seq
+ TPResult TPR = TryParseDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ // declarator
+ // abstract-declarator[opt]
+ TPR = TryParseDeclarator(true/*mayBeAbstract*/);
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ if (Tok.is(tok::equal)) {
+ // '=' assignment-expression
+ // Parse through assignment-expression.
+ tok::TokenKind StopToks[3] ={ tok::comma, tok::ellipsis, tok::r_paren };
+ if (!SkipUntil(StopToks, 3, true/*StopAtSemi*/, true/*DontConsume*/))
+ return TPResult::Error();
+ }
+
+ if (Tok.is(tok::ellipsis)) {
+ ConsumeToken();
+ return TPResult::True(); // '...' is a sign of a function declarator.
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // the comma.
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// TryParseFunctionDeclarator - We parsed a '(' and we want to try to continue
+/// parsing as a function declarator.
+/// If TryParseFunctionDeclarator fully parsed the function declarator, it will
+/// return TPResult::Ambiguous(), otherwise it will return either False() or
+/// Error().
+///
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+///
+/// exception-specification:
+/// 'throw' '(' type-id-list[opt] ')'
+///
+Parser::TPResult Parser::TryParseFunctionDeclarator() {
+
+ // The '(' is already parsed.
+
+ TPResult TPR = TryParseParameterDeclarationClause();
+ if (TPR == TPResult::Ambiguous() && Tok.isNot(tok::r_paren))
+ TPR = TPResult::False();
+
+ if (TPR == TPResult::False() || TPR == TPResult::Error())
+ return TPR;
+
+ // Parse through the parens.
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+
+ // cv-qualifier-seq
+ while (Tok.is(tok::kw_const) ||
+ Tok.is(tok::kw_volatile) ||
+ Tok.is(tok::kw_restrict) )
+ ConsumeToken();
+
+ // exception-specification
+ if (Tok.is(tok::kw_throw)) {
+ ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return TPResult::Error();
+
+ // Parse through the parens after 'throw'.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// '[' constant-expression[opt] ']'
+///
+Parser::TPResult Parser::TryParseBracketDeclarator() {
+ ConsumeBracket();
+ if (!SkipUntil(tok::r_square))
+ return TPResult::Error();
+
+ return TPResult::Ambiguous();
+}
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
new file mode 100644
index 0000000..1c2e8a6
--- /dev/null
+++ b/lib/Parse/Parser.cpp
@@ -0,0 +1,996 @@
+//===--- Parser.cpp - C Language Family Parser ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Scope.h"
+#include "llvm/Support/raw_ostream.h"
+#include "ExtensionRAIIObject.h"
+#include "ParsePragma.h"
+using namespace clang;
+
+Parser::Parser(Preprocessor &pp, Action &actions)
+ : CrashInfo(*this), PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
+ GreaterThanIsOperator(true) {
+ Tok.setKind(tok::eof);
+ CurScope = 0;
+ NumCachedScopes = 0;
+ ParenCount = BracketCount = BraceCount = 0;
+ ObjCImpDecl = DeclPtrTy();
+
+ // Add #pragma handlers. These are removed and destroyed in the
+ // destructor.
+ PackHandler.reset(new
+ PragmaPackHandler(&PP.getIdentifierTable().get("pack"), actions));
+ PP.AddPragmaHandler(0, PackHandler.get());
+
+ UnusedHandler.reset(new
+ PragmaUnusedHandler(&PP.getIdentifierTable().get("unused"), actions,
+ *this));
+ PP.AddPragmaHandler(0, UnusedHandler.get());
+}
+
+/// If a crash happens while the parser is active, print out a line indicating
+/// what the current token is.
+void PrettyStackTraceParserEntry::print(llvm::raw_ostream &OS) const {
+ const Token &Tok = P.getCurToken();
+ if (Tok.is(tok::eof)) {
+ OS << "<eof> parser at end of file\n";
+ return;
+ }
+
+ if (Tok.getLocation().isInvalid()) {
+ OS << "<unknown> parser at unknown location\n";
+ return;
+ }
+
+ const Preprocessor &PP = P.getPreprocessor();
+ Tok.getLocation().print(OS, PP.getSourceManager());
+ OS << ": current parser token '" << PP.getSpelling(Tok) << "'\n";
+}
+
+
+DiagnosticBuilder Parser::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Diags.Report(FullSourceLoc(Loc, PP.getSourceManager()), DiagID);
+}
+
+DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
+ return Diag(Tok.getLocation(), DiagID);
+}
+
+/// \brief Emits a diagnostic suggesting parentheses surrounding a
+/// given range.
+///
+/// \param Loc The location where we'll emit the diagnostic.
+/// \param Loc The kind of diagnostic to emit.
+/// \param ParenRange Source range enclosing code that should be parenthesized.
+void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
+ SourceRange ParenRange) {
+ SourceLocation EndLoc = PP.getLocForEndOfToken(ParenRange.getEnd());
+ if (!ParenRange.getEnd().isFileID() || EndLoc.isInvalid()) {
+ // We can't display the parentheses, so just dig the
+ // warning/error and return.
+ Diag(Loc, DK);
+ return;
+ }
+
+ Diag(Loc, DK)
+ << CodeModificationHint::CreateInsertion(ParenRange.getBegin(), "(")
+ << CodeModificationHint::CreateInsertion(EndLoc, ")");
+}
+
+/// MatchRHSPunctuation - For punctuation with a LHS and RHS (e.g. '['/']'),
+/// this helper function matches and consumes the specified RHS token if
+/// present. If not present, it emits the specified diagnostic indicating
+/// that the parser failed to match the RHS of the token at LHSLoc. LHSName
+/// should be the name of the unmatched LHS token.
+SourceLocation Parser::MatchRHSPunctuation(tok::TokenKind RHSTok,
+ SourceLocation LHSLoc) {
+
+ if (Tok.is(RHSTok))
+ return ConsumeAnyToken();
+
+ SourceLocation R = Tok.getLocation();
+ const char *LHSName = "unknown";
+ diag::kind DID = diag::err_parse_error;
+ switch (RHSTok) {
+ default: break;
+ case tok::r_paren : LHSName = "("; DID = diag::err_expected_rparen; break;
+ case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
+ case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
+ case tok::greater: LHSName = "<"; DID = diag::err_expected_greater; break;
+ }
+ Diag(Tok, DID);
+ Diag(LHSLoc, diag::note_matching) << LHSName;
+ SkipUntil(RHSTok);
+ return R;
+}
+
+/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
+/// input. If so, it is consumed and false is returned.
+///
+/// If the input is malformed, this emits the specified diagnostic. Next, if
+/// SkipToTok is specified, it calls SkipUntil(SkipToTok). Finally, true is
+/// returned.
+bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
+ const char *Msg, tok::TokenKind SkipToTok) {
+ if (Tok.is(ExpectedTok)) {
+ ConsumeAnyToken();
+ return false;
+ }
+
+ const char *Spelling = 0;
+ SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ if (EndLoc.isValid() &&
+ (Spelling = tok::getTokenSimpleSpelling(ExpectedTok))) {
+ // Show what code to insert to fix this problem.
+ Diag(EndLoc, DiagID)
+ << Msg
+ << CodeModificationHint::CreateInsertion(EndLoc, Spelling);
+ } else
+ Diag(Tok, DiagID) << Msg;
+
+ if (SkipToTok != tok::unknown)
+ SkipUntil(SkipToTok);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Error recovery.
+//===----------------------------------------------------------------------===//
+
+/// SkipUntil - Read tokens until we get to the specified token, then consume
+/// it (unless DontConsume is true). Because we cannot guarantee that the
+/// token will ever occur, this skips to the next token, or to some likely
+/// good stopping point. If StopAtSemi is true, skipping will stop at a ';'
+/// character.
+///
+/// If SkipUntil finds the specified token, it returns true, otherwise it
+/// returns false.
+bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks,
+ bool StopAtSemi, bool DontConsume) {
+ // We always want this function to skip at least one token if the first token
+ // isn't T and if not at EOF.
+ bool isFirstTokenSkipped = true;
+ while (1) {
+ // If we found one of the tokens, stop and return true.
+ for (unsigned i = 0; i != NumToks; ++i) {
+ if (Tok.is(Toks[i])) {
+ if (DontConsume) {
+ // Noop, don't consume the token.
+ } else {
+ ConsumeAnyToken();
+ }
+ return true;
+ }
+ }
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ // Ran out of tokens.
+ return false;
+
+ case tok::l_paren:
+ // Recursively skip properly-nested parens.
+ ConsumeParen();
+ SkipUntil(tok::r_paren, false);
+ break;
+ case tok::l_square:
+ // Recursively skip properly-nested square brackets.
+ ConsumeBracket();
+ SkipUntil(tok::r_square, false);
+ break;
+ case tok::l_brace:
+ // Recursively skip properly-nested braces.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, false);
+ break;
+
+ // Okay, we found a ']' or '}' or ')', which we think should be balanced.
+ // Since the user wasn't looking for this token (if they were, it would
+ // already be handled), this isn't balanced. If there is a LHS token at a
+ // higher level, we will assume that this matches the unbalanced token
+ // and return it. Otherwise, this is a spurious RHS token, which we skip.
+ case tok::r_paren:
+ if (ParenCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeParen();
+ break;
+ case tok::r_square:
+ if (BracketCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeBracket();
+ break;
+ case tok::r_brace:
+ if (BraceCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeBrace();
+ break;
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ ConsumeStringToken();
+ break;
+ case tok::semi:
+ if (StopAtSemi)
+ return false;
+ // FALL THROUGH.
+ default:
+ // Skip this token.
+ ConsumeToken();
+ break;
+ }
+ isFirstTokenSkipped = false;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Scope manipulation
+//===----------------------------------------------------------------------===//
+
+/// EnterScope - Start a new scope.
+void Parser::EnterScope(unsigned ScopeFlags) {
+ if (NumCachedScopes) {
+ Scope *N = ScopeCache[--NumCachedScopes];
+ N->Init(CurScope, ScopeFlags);
+ CurScope = N;
+ } else {
+ CurScope = new Scope(CurScope, ScopeFlags);
+ }
+}
+
+/// ExitScope - Pop a scope off the scope stack.
+void Parser::ExitScope() {
+ assert(CurScope && "Scope imbalance!");
+
+ // Inform the actions module that this scope is going away if there are any
+ // decls in it.
+ if (!CurScope->decl_empty())
+ Actions.ActOnPopScope(Tok.getLocation(), CurScope);
+
+ Scope *OldScope = CurScope;
+ CurScope = OldScope->getParent();
+
+ if (NumCachedScopes == ScopeCacheSize)
+ delete OldScope;
+ else
+ ScopeCache[NumCachedScopes++] = OldScope;
+}
+
+
+
+
+//===----------------------------------------------------------------------===//
+// C99 6.9: External Definitions.
+//===----------------------------------------------------------------------===//
+
+Parser::~Parser() {
+ // If we still have scopes active, delete the scope tree.
+ delete CurScope;
+
+ // Free the scope cache.
+ for (unsigned i = 0, e = NumCachedScopes; i != e; ++i)
+ delete ScopeCache[i];
+
+ // Remove the pragma handlers we installed.
+ PP.RemovePragmaHandler(0, PackHandler.get());
+ PackHandler.reset();
+ PP.RemovePragmaHandler(0, UnusedHandler.get());
+ UnusedHandler.reset();
+}
+
+/// Initialize - Warm up the parser.
+///
+void Parser::Initialize() {
+ // Prime the lexer look-ahead.
+ ConsumeToken();
+
+ // Create the translation unit scope. Install it as the current scope.
+ assert(CurScope == 0 && "A scope is already active?");
+ EnterScope(Scope::DeclScope);
+ Actions.ActOnTranslationUnitScope(Tok.getLocation(), CurScope);
+
+ if (Tok.is(tok::eof) &&
+ !getLang().CPlusPlus) // Empty source file is an extension in C
+ Diag(Tok, diag::ext_empty_source_file);
+
+ // Initialization for Objective-C context sensitive keywords recognition.
+ // Referenced in Parser::ParseObjCTypeQualifierList.
+ if (getLang().ObjC1) {
+ ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
+ ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
+ ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
+ ObjCTypeQuals[objc_oneway] = &PP.getIdentifierTable().get("oneway");
+ ObjCTypeQuals[objc_bycopy] = &PP.getIdentifierTable().get("bycopy");
+ ObjCTypeQuals[objc_byref] = &PP.getIdentifierTable().get("byref");
+ }
+
+ Ident_super = &PP.getIdentifierTable().get("super");
+}
+
+/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
+/// action tells us to. This returns true if the EOF was encountered.
+bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
+ Result = DeclGroupPtrTy();
+ if (Tok.is(tok::eof)) {
+ Actions.ActOnEndOfTranslationUnit();
+ return true;
+ }
+
+ Result = ParseExternalDeclaration();
+ return false;
+}
+
+/// ParseTranslationUnit:
+/// translation-unit: [C99 6.9]
+/// external-declaration
+/// translation-unit external-declaration
+void Parser::ParseTranslationUnit() {
+ Initialize();
+
+ DeclGroupPtrTy Res;
+ while (!ParseTopLevelDecl(Res))
+ /*parse them all*/;
+
+ ExitScope();
+ assert(CurScope == 0 && "Scope imbalance!");
+}
+
+/// ParseExternalDeclaration:
+///
+/// external-declaration: [C99 6.9], declaration: [C++ dcl.dcl]
+/// function-definition
+/// declaration
+/// [EXT] ';'
+/// [GNU] asm-definition
+/// [GNU] __extension__ external-declaration
+/// [OBJC] objc-class-definition
+/// [OBJC] objc-class-declaration
+/// [OBJC] objc-alias-declaration
+/// [OBJC] objc-protocol-definition
+/// [OBJC] objc-method-definition
+/// [OBJC] @end
+/// [C++] linkage-specification
+/// [GNU] asm-definition:
+/// simple-asm-expr ';'
+///
+Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration() {
+ DeclPtrTy SingleDecl;
+ switch (Tok.getKind()) {
+ case tok::semi:
+ Diag(Tok, diag::ext_top_level_semi)
+ << CodeModificationHint::CreateRemoval(SourceRange(Tok.getLocation()));
+ ConsumeToken();
+ // TODO: Invoke action for top-level semicolon.
+ return DeclGroupPtrTy();
+ case tok::r_brace:
+ Diag(Tok, diag::err_expected_external_declaration);
+ ConsumeBrace();
+ return DeclGroupPtrTy();
+ case tok::eof:
+ Diag(Tok, diag::err_expected_external_declaration);
+ return DeclGroupPtrTy();
+ case tok::kw___extension__: {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseExternalDeclaration();
+ }
+ case tok::kw_asm: {
+ OwningExprResult Result(ParseSimpleAsm());
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ "top-level asm block");
+
+ if (Result.isInvalid())
+ return DeclGroupPtrTy();
+ SingleDecl = Actions.ActOnFileScopeAsmDecl(Tok.getLocation(), move(Result));
+ break;
+ }
+ case tok::at:
+ // @ is not a legal token unless objc is enabled, no need to check for ObjC.
+ /// FIXME: ParseObjCAtDirectives should return a DeclGroup for things like
+ /// @class foo, bar;
+ SingleDecl = ParseObjCAtDirectives();
+ break;
+ case tok::minus:
+ case tok::plus:
+ if (!getLang().ObjC1) {
+ Diag(Tok, diag::err_expected_external_declaration);
+ ConsumeToken();
+ return DeclGroupPtrTy();
+ }
+ SingleDecl = ParseObjCMethodDefinition();
+ break;
+ case tok::kw_using:
+ case tok::kw_namespace:
+ case tok::kw_typedef:
+ case tok::kw_template:
+ case tok::kw_export: // As in 'export template'
+ case tok::kw_static_assert:
+ // A function definition cannot start with a these keywords.
+ {
+ SourceLocation DeclEnd;
+ return ParseDeclaration(Declarator::FileContext, DeclEnd);
+ }
+ default:
+ // We can't tell whether this is a function-definition or declaration yet.
+ return ParseDeclarationOrFunctionDefinition();
+ }
+
+ // This routine returns a DeclGroup, if the thing we parsed only contains a
+ // single decl, convert it now.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl);
+}
+
+/// \brief Determine whether the current token, if it occurs after a
+/// declarator, continues a declaration or declaration list.
+bool Parser::isDeclarationAfterDeclarator() {
+ return Tok.is(tok::equal) || // int X()= -> not a function def
+ Tok.is(tok::comma) || // int X(), -> not a function def
+ Tok.is(tok::semi) || // int X(); -> not a function def
+ Tok.is(tok::kw_asm) || // int X() __asm__ -> not a function def
+ Tok.is(tok::kw___attribute) || // int X() __attr__ -> not a function def
+ (getLang().CPlusPlus &&
+ Tok.is(tok::l_paren)); // int X(0) -> not a function def [C++]
+}
+
+/// \brief Determine whether the current token, if it occurs after a
+/// declarator, indicates the start of a function definition.
+bool Parser::isStartOfFunctionDefinition() {
+ return Tok.is(tok::l_brace) || // int X() {}
+ (!getLang().CPlusPlus &&
+ isDeclarationSpecifier()) || // int X(f) int f; {}
+ (getLang().CPlusPlus &&
+ (Tok.is(tok::colon) || // X() : Base() {} (used for ctors)
+ Tok.is(tok::kw_try))); // X() try { ... }
+}
+
+/// ParseDeclarationOrFunctionDefinition - Parse either a function-definition or
+/// a declaration. We can't tell which we have until we read up to the
+/// compound-statement in function-definition. TemplateParams, if
+/// non-NULL, provides the template parameters when we're parsing a
+/// C++ template-declaration.
+///
+/// function-definition: [C99 6.9.1]
+/// decl-specs declarator declaration-list[opt] compound-statement
+/// [C90] function-definition: [C99 6.7.1] - implicit int result
+/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
+///
+/// declaration: [C99 6.7]
+/// declaration-specifiers init-declarator-list[opt] ';'
+/// [!C99] init-declarator-list ';' [TODO: warn in c99 mode]
+/// [OMP] threadprivate-directive [TODO]
+///
+Parser::DeclGroupPtrTy
+Parser::ParseDeclarationOrFunctionDefinition(AccessSpecifier AS) {
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS;
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS);
+
+ // C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
+ // declaration-specifiers init-declarator-list[opt] ';'
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(CurScope, DS);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ // ObjC2 allows prefix attributes on class interfaces and protocols.
+ // FIXME: This still needs better diagnostics. We should only accept
+ // attributes here, no types, etc.
+ if (getLang().ObjC2 && Tok.is(tok::at)) {
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+ if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
+ !Tok.isObjCAtKeyword(tok::objc_protocol)) {
+ Diag(Tok, diag::err_objc_unexpected_attr);
+ SkipUntil(tok::semi); // FIXME: better skip?
+ return DeclGroupPtrTy();
+ }
+ const char *PrevSpec = 0;
+ if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec))
+ Diag(AtLoc, diag::err_invalid_decl_spec_combination) << PrevSpec;
+
+ DeclPtrTy TheDecl;
+ if (Tok.isObjCAtKeyword(tok::objc_protocol))
+ TheDecl = ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
+ else
+ TheDecl = ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes());
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ // If the declspec consisted only of 'extern' and we have a string
+ // literal following it, this must be a C++ linkage specifier like
+ // 'extern "C"'.
+ if (Tok.is(tok::string_literal) && getLang().CPlusPlus &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
+ DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
+ DeclPtrTy TheDecl = ParseLinkage(Declarator::FileContext);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ // Parse the first declarator.
+ Declarator DeclaratorInfo(DS, Declarator::FileContext);
+ ParseDeclarator(DeclaratorInfo);
+ // Error parsing the declarator?
+ if (!DeclaratorInfo.hasName()) {
+ // If so, skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return DeclGroupPtrTy();
+ }
+
+ // If we have a declaration or declarator list, handle it.
+ if (isDeclarationAfterDeclarator()) {
+ // Parse the init-declarator-list for a normal declaration.
+ DeclGroupPtrTy DG =
+ ParseInitDeclaratorListAfterFirstDeclarator(DeclaratorInfo);
+ // Eat the semi colon after the declaration.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_declation);
+ return DG;
+ }
+
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ isStartOfFunctionDefinition()) {
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(Tok, diag::err_function_declared_typedef);
+
+ if (Tok.is(tok::l_brace)) {
+ // This recovery skips the entire function body. It would be nice
+ // to simply call ParseFunctionDefinition() below, however Sema
+ // assumes the declarator represents a function, not a typedef.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, true);
+ } else {
+ SkipUntil(tok::semi);
+ }
+ return DeclGroupPtrTy();
+ }
+ DeclPtrTy TheDecl = ParseFunctionDefinition(DeclaratorInfo);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ if (DeclaratorInfo.isFunctionDeclarator())
+ Diag(Tok, diag::err_expected_fn_body);
+ else
+ Diag(Tok, diag::err_invalid_token_after_toplevel_declarator);
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+}
+
+/// ParseFunctionDefinition - We parsed and verified that the specified
+/// Declarator is well formed. If this is a K&R-style function, read the
+/// parameters declaration-list, then start the compound-statement.
+///
+/// function-definition: [C99 6.9.1]
+/// decl-specs declarator declaration-list[opt] compound-statement
+/// [C90] function-definition: [C99 6.7.1] - implicit int result
+/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
+/// [C++] function-definition: [C++ 8.4]
+/// decl-specifier-seq[opt] declarator ctor-initializer[opt]
+/// function-body
+/// [C++] function-definition: [C++ 8.4]
+/// decl-specifier-seq[opt] declarator function-try-block
+///
+Parser::DeclPtrTy Parser::ParseFunctionDefinition(Declarator &D) {
+ const DeclaratorChunk &FnTypeInfo = D.getTypeObject(0);
+ assert(FnTypeInfo.Kind == DeclaratorChunk::Function &&
+ "This isn't a function declarator!");
+ const DeclaratorChunk::FunctionTypeInfo &FTI = FnTypeInfo.Fun;
+
+ // If this is C90 and the declspecs were completely missing, fudge in an
+ // implicit int. We do this here because this is the only place where
+ // declaration-specifiers are completely optional in the grammar.
+ if (getLang().ImplicitInt && D.getDeclSpec().isEmpty()) {
+ const char *PrevSpec;
+ D.getMutableDeclSpec().SetTypeSpecType(DeclSpec::TST_int,
+ D.getIdentifierLoc(),
+ PrevSpec);
+ D.SetRangeBegin(D.getDeclSpec().getSourceRange().getBegin());
+ }
+
+ // If this declaration was formed with a K&R-style identifier list for the
+ // arguments, parse declarations for all of the args next.
+ // int foo(a,b) int a; float b; {}
+ if (!FTI.hasPrototype && FTI.NumArgs != 0)
+ ParseKNRParamDeclarations(D);
+
+ // We should have either an opening brace or, in a C++ constructor,
+ // we may have a colon.
+ if (Tok.isNot(tok::l_brace) && Tok.isNot(tok::colon) &&
+ Tok.isNot(tok::kw_try)) {
+ Diag(Tok, diag::err_expected_fn_body);
+
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ SkipUntil(tok::l_brace, true, true);
+
+ // If we didn't find the '{', bail out.
+ if (Tok.isNot(tok::l_brace))
+ return DeclPtrTy();
+ }
+
+ // Enter a scope for the function body.
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+
+ // Tell the actions module that we have entered a function definition with the
+ // specified Declarator for the function.
+ DeclPtrTy Res = Actions.ActOnStartOfFunctionDef(CurScope, D);
+
+ if (Tok.is(tok::kw_try))
+ return ParseFunctionTryBlock(Res);
+
+ // If we have a colon, then we're probably parsing a C++
+ // ctor-initializer.
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(Res);
+
+ return ParseFunctionStatementBody(Res);
+}
+
+/// ParseKNRParamDeclarations - Parse 'declaration-list[opt]' which provides
+/// types for a function with a K&R-style identifier list for arguments.
+void Parser::ParseKNRParamDeclarations(Declarator &D) {
+ // We know that the top-level of this declarator is a function.
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope|Scope::DeclScope);
+
+ // Read all the argument declarations.
+ while (isDeclarationSpecifier()) {
+ SourceLocation DSStart = Tok.getLocation();
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS;
+ ParseDeclarationSpecifiers(DS);
+
+ // C99 6.9.1p6: 'each declaration in the declaration list shall have at
+ // least one declarator'.
+ // NOTE: GCC just makes this an ext-warn. It's not clear what it does with
+ // the declarations though. It's trivial to ignore them, really hard to do
+ // anything else with them.
+ if (Tok.is(tok::semi)) {
+ Diag(DSStart, diag::err_declaration_does_not_declare_param);
+ ConsumeToken();
+ continue;
+ }
+
+ // C99 6.9.1p6: Declarations shall contain no storage-class specifiers other
+ // than register.
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_register) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ DS.ClearStorageClassSpecs();
+ }
+ if (DS.isThreadSpecified()) {
+ Diag(DS.getThreadSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ DS.ClearStorageClassSpecs();
+ }
+
+ // Parse the first declarator attached to this declspec.
+ Declarator ParmDeclarator(DS, Declarator::KNRTypeListContext);
+ ParseDeclarator(ParmDeclarator);
+
+ // Handle the full declarator list.
+ while (1) {
+ Action::AttrTy *AttrList;
+ // If attributes are present, parse them.
+ if (Tok.is(tok::kw___attribute))
+ // FIXME: attach attributes too.
+ AttrList = ParseAttributes();
+
+ // Ask the actions module to compute the type for this declarator.
+ Action::DeclPtrTy Param =
+ Actions.ActOnParamDeclarator(CurScope, ParmDeclarator);
+
+ if (Param &&
+ // A missing identifier has already been diagnosed.
+ ParmDeclarator.getIdentifier()) {
+
+ // Scan the argument list looking for the correct param to apply this
+ // type.
+ for (unsigned i = 0; ; ++i) {
+ // C99 6.9.1p6: those declarators shall declare only identifiers from
+ // the identifier list.
+ if (i == FTI.NumArgs) {
+ Diag(ParmDeclarator.getIdentifierLoc(), diag::err_no_matching_param)
+ << ParmDeclarator.getIdentifier();
+ break;
+ }
+
+ if (FTI.ArgInfo[i].Ident == ParmDeclarator.getIdentifier()) {
+ // Reject redefinitions of parameters.
+ if (FTI.ArgInfo[i].Param) {
+ Diag(ParmDeclarator.getIdentifierLoc(),
+ diag::err_param_redefinition)
+ << ParmDeclarator.getIdentifier();
+ } else {
+ FTI.ArgInfo[i].Param = Param;
+ }
+ break;
+ }
+ }
+ }
+
+ // If we don't have a comma, it is either the end of the list (a ';') or
+ // an error, bail out.
+ if (Tok.isNot(tok::comma))
+ break;
+
+ // Consume the comma.
+ ConsumeToken();
+
+ // Parse the next declarator.
+ ParmDeclarator.clear();
+ ParseDeclarator(ParmDeclarator);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_parse_error);
+ // Skip to end of block or statement
+ SkipUntil(tok::semi, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
+ }
+
+ // The actions module must verify that all arguments were declared.
+ Actions.ActOnFinishKNRParamDeclarations(CurScope, D, Tok.getLocation());
+}
+
+
+/// ParseAsmStringLiteral - This is just a normal string-literal, but is not
+/// allowed to be a wide string, and is not subject to character translation.
+///
+/// [GNU] asm-string-literal:
+/// string-literal
+///
+Parser::OwningExprResult Parser::ParseAsmStringLiteral() {
+ if (!isTokenStringLiteral()) {
+ Diag(Tok, diag::err_expected_string_literal);
+ return ExprError();
+ }
+
+ OwningExprResult Res(ParseStringLiteralExpression());
+ if (Res.isInvalid()) return move(Res);
+
+ // TODO: Diagnose: wide string literal in 'asm'
+
+ return move(Res);
+}
+
+/// ParseSimpleAsm
+///
+/// [GNU] simple-asm-expr:
+/// 'asm' '(' asm-string-literal ')'
+///
+Parser::OwningExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
+ assert(Tok.is(tok::kw_asm) && "Not an asm!");
+ SourceLocation Loc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm";
+ return ExprError();
+ }
+
+ Loc = ConsumeParen();
+
+ OwningExprResult Result(ParseAsmStringLiteral());
+
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, true, true);
+ if (EndLoc)
+ *EndLoc = Tok.getLocation();
+ ConsumeAnyToken();
+ } else {
+ Loc = MatchRHSPunctuation(tok::r_paren, Loc);
+ if (EndLoc)
+ *EndLoc = Loc;
+ }
+
+ return move(Result);
+}
+
+/// TryAnnotateTypeOrScopeToken - If the current token position is on a
+/// typename (possibly qualified in C++) or a C++ scope specifier not followed
+/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
+/// with a single annotation token representing the typename or C++ scope
+/// respectively.
+/// This simplifies handling of C++ scope specifiers and allows efficient
+/// backtracking without the need to re-parse and resolve nested-names and
+/// typenames.
+/// It will mainly be called when we expect to treat identifiers as typenames
+/// (if they are typenames). For example, in C we do not expect identifiers
+/// inside expressions to be treated as typenames so it will not be called
+/// for expressions in C.
+/// The benefit for C/ObjC is that a typename will be annotated and
+/// Actions.getTypeName will not be needed to be called again (e.g. getTypeName
+/// will not be called twice, once to check whether we have a declaration
+/// specifier, and another one to get the actual type inside
+/// ParseDeclarationSpecifiers).
+///
+/// This returns true if the token was annotated.
+///
+/// Note that this routine emits an error if you call it with ::new or ::delete
+/// as the current tokens, so only call it in contexts where these are invalid.
+bool Parser::TryAnnotateTypeOrScopeToken() {
+ assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)
+ || Tok.is(tok::kw_typename)) &&
+ "Cannot be a type or scope token!");
+
+ if (Tok.is(tok::kw_typename)) {
+ // Parse a C++ typename-specifier, e.g., "typename T::type".
+ //
+ // typename-specifier:
+ // 'typename' '::' [opt] nested-name-specifier identifier
+ // 'typename' '::' [opt] nested-name-specifier template [opt]
+ // simple-template-id
+ SourceLocation TypenameLoc = ConsumeToken();
+ CXXScopeSpec SS;
+ bool HadNestedNameSpecifier = ParseOptionalCXXScopeSpecifier(SS);
+ if (!HadNestedNameSpecifier) {
+ Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
+ return false;
+ }
+
+ TypeResult Ty;
+ if (Tok.is(tok::identifier)) {
+ // FIXME: check whether the next token is '<', first!
+ Ty = Actions.ActOnTypenameType(TypenameLoc, SS, *Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId
+ = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ if (TemplateId->Kind == TNK_Function_template) {
+ Diag(Tok, diag::err_typename_refers_to_non_type_template)
+ << Tok.getAnnotationRange();
+ return false;
+ }
+
+ AnnotateTemplateIdTokenAsType(0);
+ assert(Tok.is(tok::annot_typename) &&
+ "AnnotateTemplateIdTokenAsType isn't working properly");
+ if (Tok.getAnnotationValue())
+ Ty = Actions.ActOnTypenameType(TypenameLoc, SS, SourceLocation(),
+ Tok.getAnnotationValue());
+ else
+ Ty = true;
+ } else {
+ Diag(Tok, diag::err_expected_type_name_after_typename)
+ << SS.getRange();
+ return false;
+ }
+
+ Tok.setKind(tok::annot_typename);
+ Tok.setAnnotationValue(Ty.isInvalid()? 0 : Ty.get());
+ Tok.setAnnotationEndLoc(Tok.getLocation());
+ Tok.setLocation(TypenameLoc);
+ PP.AnnotateCachedTokens(Tok);
+ return true;
+ }
+
+ CXXScopeSpec SS;
+ if (getLang().CPlusPlus)
+ ParseOptionalCXXScopeSpecifier(SS);
+
+ if (Tok.is(tok::identifier)) {
+ // Determine whether the identifier is a type name.
+ if (TypeTy *Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), CurScope, &SS)) {
+ // This is a typename. Replace the current token in-place with an
+ // annotation type token.
+ Tok.setKind(tok::annot_typename);
+ Tok.setAnnotationValue(Ty);
+ Tok.setAnnotationEndLoc(Tok.getLocation());
+ if (SS.isNotEmpty()) // it was a C++ qualified type name.
+ Tok.setLocation(SS.getBeginLoc());
+
+ // In case the tokens were cached, have Preprocessor replace
+ // them with the annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return true;
+ }
+
+ if (!getLang().CPlusPlus) {
+ // If we're in C, we can't have :: tokens at all (the lexer won't return
+ // them). If the identifier is not a type, then it can't be scope either,
+ // just early exit.
+ return false;
+ }
+
+ // If this is a template-id, annotate with a template-id or type token.
+ if (NextToken().is(tok::less)) {
+ TemplateTy Template;
+ if (TemplateNameKind TNK
+ = Actions.isTemplateName(*Tok.getIdentifierInfo(),
+ CurScope, Template, &SS))
+ AnnotateTemplateIdToken(Template, TNK, &SS);
+ }
+
+ // The current token, which is either an identifier or a
+ // template-id, is not part of the annotation. Fall through to
+ // push that token back into the stream and complete the C++ scope
+ // specifier annotation.
+ }
+
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId
+ = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ if (TemplateId->Kind == TNK_Type_template) {
+ // A template-id that refers to a type was parsed into a
+ // template-id annotation in a context where we weren't allowed
+ // to produce a type annotation token. Update the template-id
+ // annotation token to a type annotation token now.
+ AnnotateTemplateIdTokenAsType(&SS);
+ return true;
+ }
+ }
+
+ if (SS.isEmpty())
+ return false;
+
+ // A C++ scope specifier that isn't followed by a typename.
+ // Push the current token back into the token stream (or revert it if it is
+ // cached) and use an annotation scope token for current token.
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::annot_cxxscope);
+ Tok.setAnnotationValue(SS.getScopeRep());
+ Tok.setAnnotationRange(SS.getRange());
+
+ // In case the tokens were cached, have Preprocessor replace them with the
+ // annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return true;
+}
+
+/// TryAnnotateScopeToken - Like TryAnnotateTypeOrScopeToken but only
+/// annotates C++ scope specifiers and template-ids. This returns
+/// true if the token was annotated.
+///
+/// Note that this routine emits an error if you call it with ::new or ::delete
+/// as the current tokens, so only call it in contexts where these are invalid.
+bool Parser::TryAnnotateCXXScopeToken() {
+ assert(getLang().CPlusPlus &&
+ "Call sites of this function should be guarded by checking for C++");
+ assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) &&
+ "Cannot be a type or scope token!");
+
+ CXXScopeSpec SS;
+ if (!ParseOptionalCXXScopeSpecifier(SS))
+ return Tok.is(tok::annot_template_id);
+
+ // Push the current token back into the token stream (or revert it if it is
+ // cached) and use an annotation scope token for current token.
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::annot_cxxscope);
+ Tok.setAnnotationValue(SS.getScopeRep());
+ Tok.setAnnotationRange(SS.getRange());
+
+ // In case the tokens were cached, have Preprocessor replace them with the
+ // annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return true;
+}
diff --git a/lib/Rewrite/CMakeLists.txt b/lib/Rewrite/CMakeLists.txt
new file mode 100644
index 0000000..52670b8
--- /dev/null
+++ b/lib/Rewrite/CMakeLists.txt
@@ -0,0 +1,9 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangRewrite
+ DeltaTree.cpp
+ HTMLRewrite.cpp
+ Rewriter.cpp
+ RewriteRope.cpp
+ TokenRewriter.cpp
+ )
diff --git a/lib/Rewrite/DeltaTree.cpp b/lib/Rewrite/DeltaTree.cpp
new file mode 100644
index 0000000..5d51dda
--- /dev/null
+++ b/lib/Rewrite/DeltaTree.cpp
@@ -0,0 +1,485 @@
+//===--- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DeltaTree and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/DeltaTree.h"
+#include "llvm/Support/Casting.h"
+#include <cstring>
+#include <cstdio>
+using namespace clang;
+using llvm::cast;
+using llvm::dyn_cast;
+
+namespace {
+ struct SourceDelta;
+ class DeltaTreeNode;
+ class DeltaTreeInteriorNode;
+}
+
+/// The DeltaTree class is a multiway search tree (BTree) structure with some
+/// fancy features. B-Trees are are generally more memory and cache efficient
+/// than binary trees, because they store multiple keys/values in each node.
+///
+/// DeltaTree implements a key/value mapping from FileIndex to Delta, allowing
+/// fast lookup by FileIndex. However, an added (important) bonus is that it
+/// can also efficiently tell us the full accumulated delta for a specific
+/// file offset as well, without traversing the whole tree.
+///
+/// The nodes of the tree are made up of instances of two classes:
+/// DeltaTreeNode and DeltaTreeInteriorNode. The later subclasses the
+/// former and adds children pointers. Each node knows the full delta of all
+/// entries (recursively) contained inside of it, which allows us to get the
+/// full delta implied by a whole subtree in constant time.
+
+namespace {
+ /// SourceDelta - As code in the original input buffer is added and deleted,
+ /// SourceDelta records are used to keep track of how the input SourceLocation
+ /// object is mapped into the output buffer.
+ struct SourceDelta {
+ unsigned FileLoc;
+ int Delta;
+
+ static SourceDelta get(unsigned Loc, int D) {
+ SourceDelta Delta;
+ Delta.FileLoc = Loc;
+ Delta.Delta = D;
+ return Delta;
+ }
+ };
+} // end anonymous namespace
+
+
+namespace {
+ struct InsertResult {
+ DeltaTreeNode *LHS, *RHS;
+ SourceDelta Split;
+ };
+} // end anonymous namespace
+
+
+namespace {
+ /// DeltaTreeNode - The common part of all nodes.
+ ///
+ class DeltaTreeNode {
+ friend class DeltaTreeInteriorNode;
+
+ /// WidthFactor - This controls the number of K/V slots held in the BTree:
+ /// how wide it is. Each level of the BTree is guaranteed to have at least
+ /// WidthFactor-1 K/V pairs (except the root) and may have at most
+ /// 2*WidthFactor-1 K/V pairs.
+ enum { WidthFactor = 8 };
+
+ /// Values - This tracks the SourceDelta's currently in this node.
+ ///
+ SourceDelta Values[2*WidthFactor-1];
+
+ /// NumValuesUsed - This tracks the number of values this node currently
+ /// holds.
+ unsigned char NumValuesUsed;
+
+ /// IsLeaf - This is true if this is a leaf of the btree. If false, this is
+ /// an interior node, and is actually an instance of DeltaTreeInteriorNode.
+ bool IsLeaf;
+
+ /// FullDelta - This is the full delta of all the values in this node and
+ /// all children nodes.
+ int FullDelta;
+ public:
+ DeltaTreeNode(bool isLeaf = true)
+ : NumValuesUsed(0), IsLeaf(isLeaf), FullDelta(0) {}
+
+ bool isLeaf() const { return IsLeaf; }
+ int getFullDelta() const { return FullDelta; }
+ bool isFull() const { return NumValuesUsed == 2*WidthFactor-1; }
+
+ unsigned getNumValuesUsed() const { return NumValuesUsed; }
+ const SourceDelta &getValue(unsigned i) const {
+ assert(i < NumValuesUsed && "Invalid value #");
+ return Values[i];
+ }
+ SourceDelta &getValue(unsigned i) {
+ assert(i < NumValuesUsed && "Invalid value #");
+ return Values[i];
+ }
+
+ /// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
+ /// this node. If insertion is easy, do it and return false. Otherwise,
+ /// split the node, populate InsertRes with info about the split, and return
+ /// true.
+ bool DoInsertion(unsigned FileIndex, int Delta, InsertResult *InsertRes);
+
+ void DoSplit(InsertResult &InsertRes);
+
+
+ /// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
+ /// local walk over our contained deltas.
+ void RecomputeFullDeltaLocally();
+
+ void Destroy();
+
+ static inline bool classof(const DeltaTreeNode *) { return true; }
+ };
+} // end anonymous namespace
+
+namespace {
+ /// DeltaTreeInteriorNode - When isLeaf = false, a node has child pointers.
+ /// This class tracks them.
+ class DeltaTreeInteriorNode : public DeltaTreeNode {
+ DeltaTreeNode *Children[2*WidthFactor];
+ ~DeltaTreeInteriorNode() {
+ for (unsigned i = 0, e = NumValuesUsed+1; i != e; ++i)
+ Children[i]->Destroy();
+ }
+ friend class DeltaTreeNode;
+ public:
+ DeltaTreeInteriorNode() : DeltaTreeNode(false /*nonleaf*/) {}
+
+ DeltaTreeInteriorNode(DeltaTreeNode *FirstChild)
+ : DeltaTreeNode(false /*nonleaf*/) {
+ FullDelta = FirstChild->FullDelta;
+ Children[0] = FirstChild;
+ }
+
+ DeltaTreeInteriorNode(const InsertResult &IR)
+ : DeltaTreeNode(false /*nonleaf*/) {
+ Children[0] = IR.LHS;
+ Children[1] = IR.RHS;
+ Values[0] = IR.Split;
+ FullDelta = IR.LHS->getFullDelta()+IR.RHS->getFullDelta()+IR.Split.Delta;
+ NumValuesUsed = 1;
+ }
+
+ const DeltaTreeNode *getChild(unsigned i) const {
+ assert(i < getNumValuesUsed()+1 && "Invalid child");
+ return Children[i];
+ }
+ DeltaTreeNode *getChild(unsigned i) {
+ assert(i < getNumValuesUsed()+1 && "Invalid child");
+ return Children[i];
+ }
+
+ static inline bool classof(const DeltaTreeInteriorNode *) { return true; }
+ static inline bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); }
+ };
+}
+
+
+/// Destroy - A 'virtual' destructor.
+void DeltaTreeNode::Destroy() {
+ if (isLeaf())
+ delete this;
+ else
+ delete cast<DeltaTreeInteriorNode>(this);
+}
+
+/// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
+/// local walk over our contained deltas.
+void DeltaTreeNode::RecomputeFullDeltaLocally() {
+ int NewFullDelta = 0;
+ for (unsigned i = 0, e = getNumValuesUsed(); i != e; ++i)
+ NewFullDelta += Values[i].Delta;
+ if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this))
+ for (unsigned i = 0, e = getNumValuesUsed()+1; i != e; ++i)
+ NewFullDelta += IN->getChild(i)->getFullDelta();
+ FullDelta = NewFullDelta;
+}
+
+/// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
+/// this node. If insertion is easy, do it and return false. Otherwise,
+/// split the node, populate InsertRes with info about the split, and return
+/// true.
+bool DeltaTreeNode::DoInsertion(unsigned FileIndex, int Delta,
+ InsertResult *InsertRes) {
+ // Maintain full delta for this node.
+ FullDelta += Delta;
+
+ // Find the insertion point, the first delta whose index is >= FileIndex.
+ unsigned i = 0, e = getNumValuesUsed();
+ while (i != e && FileIndex > getValue(i).FileLoc)
+ ++i;
+
+ // If we found an a record for exactly this file index, just merge this
+ // value into the pre-existing record and finish early.
+ if (i != e && getValue(i).FileLoc == FileIndex) {
+ // NOTE: Delta could drop to zero here. This means that the delta entry is
+ // useless and could be removed. Supporting erases is more complex than
+ // leaving an entry with Delta=0, so we just leave an entry with Delta=0 in
+ // the tree.
+ Values[i].Delta += Delta;
+ return false;
+ }
+
+ // Otherwise, we found an insertion point, and we know that the value at the
+ // specified index is > FileIndex. Handle the leaf case first.
+ if (isLeaf()) {
+ if (!isFull()) {
+ // For an insertion into a non-full leaf node, just insert the value in
+ // its sorted position. This requires moving later values over.
+ if (i != e)
+ memmove(&Values[i+1], &Values[i], sizeof(Values[0])*(e-i));
+ Values[i] = SourceDelta::get(FileIndex, Delta);
+ ++NumValuesUsed;
+ return false;
+ }
+
+ // Otherwise, if this is leaf is full, split the node at its median, insert
+ // the value into one of the children, and return the result.
+ assert(InsertRes && "No result location specified");
+ DoSplit(*InsertRes);
+
+ if (InsertRes->Split.FileLoc > FileIndex)
+ InsertRes->LHS->DoInsertion(FileIndex, Delta, 0 /*can't fail*/);
+ else
+ InsertRes->RHS->DoInsertion(FileIndex, Delta, 0 /*can't fail*/);
+ return true;
+ }
+
+ // Otherwise, this is an interior node. Send the request down the tree.
+ DeltaTreeInteriorNode *IN = cast<DeltaTreeInteriorNode>(this);
+ if (!IN->Children[i]->DoInsertion(FileIndex, Delta, InsertRes))
+ return false; // If there was space in the child, just return.
+
+ // Okay, this split the subtree, producing a new value and two children to
+ // insert here. If this node is non-full, we can just insert it directly.
+ if (!isFull()) {
+ // Now that we have two nodes and a new element, insert the perclated value
+ // into ourself by moving all the later values/children down, then inserting
+ // the new one.
+ if (i != e)
+ memmove(&IN->Children[i+2], &IN->Children[i+1],
+ (e-i)*sizeof(IN->Children[0]));
+ IN->Children[i] = InsertRes->LHS;
+ IN->Children[i+1] = InsertRes->RHS;
+
+ if (e != i)
+ memmove(&Values[i+1], &Values[i], (e-i)*sizeof(Values[0]));
+ Values[i] = InsertRes->Split;
+ ++NumValuesUsed;
+ return false;
+ }
+
+ // Finally, if this interior node was full and a node is percolated up, split
+ // ourself and return that up the chain. Start by saving all our info to
+ // avoid having the split clobber it.
+ IN->Children[i] = InsertRes->LHS;
+ DeltaTreeNode *SubRHS = InsertRes->RHS;
+ SourceDelta SubSplit = InsertRes->Split;
+
+ // Do the split.
+ DoSplit(*InsertRes);
+
+ // Figure out where to insert SubRHS/NewSplit.
+ DeltaTreeInteriorNode *InsertSide;
+ if (SubSplit.FileLoc < InsertRes->Split.FileLoc)
+ InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->LHS);
+ else
+ InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->RHS);
+
+ // We now have a non-empty interior node 'InsertSide' to insert
+ // SubRHS/SubSplit into. Find out where to insert SubSplit.
+
+ // Find the insertion point, the first delta whose index is >SubSplit.FileLoc.
+ i = 0; e = InsertSide->getNumValuesUsed();
+ while (i != e && SubSplit.FileLoc > InsertSide->getValue(i).FileLoc)
+ ++i;
+
+ // Now we know that i is the place to insert the split value into. Insert it
+ // and the child right after it.
+ if (i != e)
+ memmove(&InsertSide->Children[i+2], &InsertSide->Children[i+1],
+ (e-i)*sizeof(IN->Children[0]));
+ InsertSide->Children[i+1] = SubRHS;
+
+ if (e != i)
+ memmove(&InsertSide->Values[i+1], &InsertSide->Values[i],
+ (e-i)*sizeof(Values[0]));
+ InsertSide->Values[i] = SubSplit;
+ ++InsertSide->NumValuesUsed;
+ InsertSide->FullDelta += SubSplit.Delta + SubRHS->getFullDelta();
+ return true;
+}
+
+/// DoSplit - Split the currently full node (which has 2*WidthFactor-1 values)
+/// into two subtrees each with "WidthFactor-1" values and a pivot value.
+/// Return the pieces in InsertRes.
+void DeltaTreeNode::DoSplit(InsertResult &InsertRes) {
+ assert(isFull() && "Why split a non-full node?");
+
+ // Since this node is full, it contains 2*WidthFactor-1 values. We move
+ // the first 'WidthFactor-1' values to the LHS child (which we leave in this
+ // node), propagate one value up, and move the last 'WidthFactor-1' values
+ // into the RHS child.
+
+ // Create the new child node.
+ DeltaTreeNode *NewNode;
+ if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this)) {
+ // If this is an interior node, also move over 'WidthFactor' children
+ // into the new node.
+ DeltaTreeInteriorNode *New = new DeltaTreeInteriorNode();
+ memcpy(&New->Children[0], &IN->Children[WidthFactor],
+ WidthFactor*sizeof(IN->Children[0]));
+ NewNode = New;
+ } else {
+ // Just create the new leaf node.
+ NewNode = new DeltaTreeNode();
+ }
+
+ // Move over the last 'WidthFactor-1' values from here to NewNode.
+ memcpy(&NewNode->Values[0], &Values[WidthFactor],
+ (WidthFactor-1)*sizeof(Values[0]));
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumValuesUsed = NumValuesUsed = WidthFactor-1;
+
+ // Recompute the two nodes' full delta.
+ NewNode->RecomputeFullDeltaLocally();
+ RecomputeFullDeltaLocally();
+
+ InsertRes.LHS = this;
+ InsertRes.RHS = NewNode;
+ InsertRes.Split = Values[WidthFactor-1];
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// DeltaTree Implementation
+//===----------------------------------------------------------------------===//
+
+//#define VERIFY_TREE
+
+#ifdef VERIFY_TREE
+/// VerifyTree - Walk the btree performing assertions on various properties to
+/// verify consistency. This is useful for debugging new changes to the tree.
+static void VerifyTree(const DeltaTreeNode *N) {
+ const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(N);
+ if (IN == 0) {
+ // Verify leaves, just ensure that FullDelta matches up and the elements
+ // are in proper order.
+ int FullDelta = 0;
+ for (unsigned i = 0, e = N->getNumValuesUsed(); i != e; ++i) {
+ if (i)
+ assert(N->getValue(i-1).FileLoc < N->getValue(i).FileLoc);
+ FullDelta += N->getValue(i).Delta;
+ }
+ assert(FullDelta == N->getFullDelta());
+ return;
+ }
+
+ // Verify interior nodes: Ensure that FullDelta matches up and the
+ // elements are in proper order and the children are in proper order.
+ int FullDelta = 0;
+ for (unsigned i = 0, e = IN->getNumValuesUsed(); i != e; ++i) {
+ const SourceDelta &IVal = N->getValue(i);
+ const DeltaTreeNode *IChild = IN->getChild(i);
+ if (i)
+ assert(IN->getValue(i-1).FileLoc < IVal.FileLoc);
+ FullDelta += IVal.Delta;
+ FullDelta += IChild->getFullDelta();
+
+ // The largest value in child #i should be smaller than FileLoc.
+ assert(IChild->getValue(IChild->getNumValuesUsed()-1).FileLoc <
+ IVal.FileLoc);
+
+ // The smallest value in child #i+1 should be larger than FileLoc.
+ assert(IN->getChild(i+1)->getValue(0).FileLoc > IVal.FileLoc);
+ VerifyTree(IChild);
+ }
+
+ FullDelta += IN->getChild(IN->getNumValuesUsed())->getFullDelta();
+
+ assert(FullDelta == N->getFullDelta());
+}
+#endif // VERIFY_TREE
+
+static DeltaTreeNode *getRoot(void *Root) {
+ return (DeltaTreeNode*)Root;
+}
+
+DeltaTree::DeltaTree() {
+ Root = new DeltaTreeNode();
+}
+DeltaTree::DeltaTree(const DeltaTree &RHS) {
+ // Currently we only support copying when the RHS is empty.
+ assert(getRoot(RHS.Root)->getNumValuesUsed() == 0 &&
+ "Can only copy empty tree");
+ Root = new DeltaTreeNode();
+}
+
+DeltaTree::~DeltaTree() {
+ getRoot(Root)->Destroy();
+}
+
+/// getDeltaAt - Return the accumulated delta at the specified file offset.
+/// This includes all insertions or delections that occurred *before* the
+/// specified file index.
+int DeltaTree::getDeltaAt(unsigned FileIndex) const {
+ const DeltaTreeNode *Node = getRoot(Root);
+
+ int Result = 0;
+
+ // Walk down the tree.
+ while (1) {
+ // For all nodes, include any local deltas before the specified file
+ // index by summing them up directly. Keep track of how many were
+ // included.
+ unsigned NumValsGreater = 0;
+ for (unsigned e = Node->getNumValuesUsed(); NumValsGreater != e;
+ ++NumValsGreater) {
+ const SourceDelta &Val = Node->getValue(NumValsGreater);
+
+ if (Val.FileLoc >= FileIndex)
+ break;
+ Result += Val.Delta;
+ }
+
+ // If we have an interior node, include information about children and
+ // recurse. Otherwise, if we have a leaf, we're done.
+ const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(Node);
+ if (!IN) return Result;
+
+ // Include any children to the left of the values we skipped, all of
+ // their deltas should be included as well.
+ for (unsigned i = 0; i != NumValsGreater; ++i)
+ Result += IN->getChild(i)->getFullDelta();
+
+ // If we found exactly the value we were looking for, break off the
+ // search early. There is no need to search the RHS of the value for
+ // partial results.
+ if (NumValsGreater != Node->getNumValuesUsed() &&
+ Node->getValue(NumValsGreater).FileLoc == FileIndex)
+ return Result+IN->getChild(NumValsGreater)->getFullDelta();
+
+ // Otherwise, traverse down the tree. The selected subtree may be
+ // partially included in the range.
+ Node = IN->getChild(NumValsGreater);
+ }
+ // NOT REACHED.
+}
+
+/// AddDelta - When a change is made that shifts around the text buffer,
+/// this method is used to record that info. It inserts a delta of 'Delta'
+/// into the current DeltaTree at offset FileIndex.
+void DeltaTree::AddDelta(unsigned FileIndex, int Delta) {
+ assert(Delta && "Adding a noop?");
+ DeltaTreeNode *MyRoot = getRoot(Root);
+
+ InsertResult InsertRes;
+ if (MyRoot->DoInsertion(FileIndex, Delta, &InsertRes)) {
+ Root = MyRoot = new DeltaTreeInteriorNode(InsertRes);
+ }
+
+#ifdef VERIFY_TREE
+ VerifyTree(MyRoot);
+#endif
+}
+
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
new file mode 100644
index 0000000..69dd03a
--- /dev/null
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -0,0 +1,574 @@
+//== HTMLRewrite.cpp - Translate source code into prettified HTML --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HTMLRewriter clas, which is used to translate the
+// text of a source file into prettified HTML.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Lex/TokenConcatenation.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+
+/// HighlightRange - Highlight a range in the source code with the specified
+/// start/end tags. B/E must be in the same file. This ensures that
+/// start/end tags are placed at the start/end of each line if the range is
+/// multiline.
+void html::HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E,
+ const char *StartTag, const char *EndTag) {
+ SourceManager &SM = R.getSourceMgr();
+ B = SM.getInstantiationLoc(B);
+ E = SM.getInstantiationLoc(E);
+ FileID FID = SM.getFileID(B);
+ assert(SM.getFileID(E) == FID && "B/E not in the same file!");
+
+ unsigned BOffset = SM.getFileOffset(B);
+ unsigned EOffset = SM.getFileOffset(E);
+
+ // Include the whole end token in the range.
+ EOffset += Lexer::MeasureTokenLength(E, R.getSourceMgr(), R.getLangOpts());
+
+ HighlightRange(R.getEditBuffer(FID), BOffset, EOffset,
+ SM.getBufferData(FID).first, StartTag, EndTag);
+}
+
+/// HighlightRange - This is the same as the above method, but takes
+/// decomposed file locations.
+void html::HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E,
+ const char *BufferStart,
+ const char *StartTag, const char *EndTag) {
+ // Insert the tag at the absolute start/end of the range.
+ RB.InsertTextAfter(B, StartTag, strlen(StartTag));
+ RB.InsertTextBefore(E, EndTag, strlen(EndTag));
+
+ // Scan the range to see if there is a \r or \n. If so, and if the line is
+ // not blank, insert tags on that line as well.
+ bool HadOpenTag = true;
+
+ unsigned LastNonWhiteSpace = B;
+ for (unsigned i = B; i != E; ++i) {
+ switch (BufferStart[i]) {
+ case '\r':
+ case '\n':
+ // Okay, we found a newline in the range. If we have an open tag, we need
+ // to insert a close tag at the first non-whitespace before the newline.
+ if (HadOpenTag)
+ RB.InsertTextBefore(LastNonWhiteSpace+1, EndTag, strlen(EndTag));
+
+ // Instead of inserting an open tag immediately after the newline, we
+ // wait until we see a non-whitespace character. This prevents us from
+ // inserting tags around blank lines, and also allows the open tag to
+ // be put *after* whitespace on a non-blank line.
+ HadOpenTag = false;
+ break;
+ case '\0':
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ // Ignore whitespace.
+ break;
+
+ default:
+ // If there is no tag open, do it now.
+ if (!HadOpenTag) {
+ RB.InsertTextAfter(i, StartTag, strlen(StartTag));
+ HadOpenTag = true;
+ }
+
+ // Remember this character.
+ LastNonWhiteSpace = i;
+ break;
+ }
+ }
+}
+
+void html::EscapeText(Rewriter &R, FileID FID,
+ bool EscapeSpaces, bool ReplaceTabs) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* C = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+
+ assert (C <= FileEnd);
+
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ unsigned ColNo = 0;
+ for (unsigned FilePos = 0; C != FileEnd ; ++C, ++FilePos) {
+ switch (*C) {
+ default: ++ColNo; break;
+ case '\n':
+ case '\r':
+ ColNo = 0;
+ break;
+
+ case ' ':
+ if (EscapeSpaces)
+ RB.ReplaceText(FilePos, 1, "&nbsp;", 6);
+ ++ColNo;
+ break;
+ case '\f':
+ RB.ReplaceText(FilePos, 1, "<hr>", 4);
+ ColNo = 0;
+ break;
+
+ case '\t': {
+ if (!ReplaceTabs)
+ break;
+ unsigned NumSpaces = 8-(ColNo&7);
+ if (EscapeSpaces)
+ RB.ReplaceText(FilePos, 1, "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"
+ "&nbsp;&nbsp;&nbsp;", 6*NumSpaces);
+ else
+ RB.ReplaceText(FilePos, 1, " ", NumSpaces);
+ ColNo += NumSpaces;
+ break;
+ }
+ case '<':
+ RB.ReplaceText(FilePos, 1, "&lt;", 4);
+ ++ColNo;
+ break;
+
+ case '>':
+ RB.ReplaceText(FilePos, 1, "&gt;", 4);
+ ++ColNo;
+ break;
+
+ case '&':
+ RB.ReplaceText(FilePos, 1, "&amp;", 5);
+ ++ColNo;
+ break;
+ }
+ }
+}
+
+std::string html::EscapeText(const std::string& s, bool EscapeSpaces,
+ bool ReplaceTabs) {
+
+ unsigned len = s.size();
+ std::string Str;
+ llvm::raw_string_ostream os(Str);
+
+ for (unsigned i = 0 ; i < len; ++i) {
+
+ char c = s[i];
+ switch (c) {
+ default:
+ os << c; break;
+
+ case ' ':
+ if (EscapeSpaces) os << "&nbsp;";
+ else os << ' ';
+ break;
+
+ case '\t':
+ if (ReplaceTabs) {
+ if (EscapeSpaces)
+ for (unsigned i = 0; i < 4; ++i)
+ os << "&nbsp;";
+ else
+ for (unsigned i = 0; i < 4; ++i)
+ os << " ";
+ }
+ else
+ os << c;
+
+ break;
+
+ case '<': os << "&lt;"; break;
+ case '>': os << "&gt;"; break;
+ case '&': os << "&amp;"; break;
+ }
+ }
+
+ return os.str();
+}
+
+static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
+ unsigned B, unsigned E) {
+ llvm::SmallString<100> Str;
+ Str += "<tr><td class=\"num\" id=\"LN";
+ Str.append_uint(LineNo);
+ Str += "\">";
+ Str.append_uint(LineNo);
+ Str += "</td><td class=\"line\">";
+
+ if (B == E) { // Handle empty lines.
+ Str += " </td></tr>";
+ RB.InsertTextBefore(B, &Str[0], Str.size());
+ } else {
+ RB.InsertTextBefore(B, &Str[0], Str.size());
+ RB.InsertTextBefore(E, "</td></tr>", strlen("</td></tr>"));
+ }
+}
+
+void html::AddLineNumbers(Rewriter& R, FileID FID) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* FileBeg = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+ const char* C = FileBeg;
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ assert (C <= FileEnd);
+
+ unsigned LineNo = 0;
+ unsigned FilePos = 0;
+
+ while (C != FileEnd) {
+
+ ++LineNo;
+ unsigned LineStartPos = FilePos;
+ unsigned LineEndPos = FileEnd - FileBeg;
+
+ assert (FilePos <= LineEndPos);
+ assert (C < FileEnd);
+
+ // Scan until the newline (or end-of-file).
+
+ while (C != FileEnd) {
+ char c = *C;
+ ++C;
+
+ if (c == '\n') {
+ LineEndPos = FilePos++;
+ break;
+ }
+
+ ++FilePos;
+ }
+
+ AddLineNumber(RB, LineNo, LineStartPos, LineEndPos);
+ }
+
+ // Add one big table tag that surrounds all of the code.
+ RB.InsertTextBefore(0, "<table class=\"code\">\n",
+ strlen("<table class=\"code\">\n"));
+
+ RB.InsertTextAfter(FileEnd - FileBeg, "</table>", strlen("</table>"));
+}
+
+void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
+ const char *title) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* FileStart = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+
+ SourceLocation StartLoc = R.getSourceMgr().getLocForStartOfFile(FID);
+ SourceLocation EndLoc = StartLoc.getFileLocWithOffset(FileEnd-FileStart);
+
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "<!doctype html>\n" // Use HTML 5 doctype
+ "<html>\n<head>\n";
+
+ if (title)
+ os << "<title>" << html::EscapeText(title) << "</title>\n";
+
+ os << "<style type=\"text/css\">\n"
+ " body { color:#000000; background-color:#ffffff }\n"
+ " body { font-family:Helvetica, sans-serif; font-size:10pt }\n"
+ " h1 { font-size:14pt }\n"
+ " .code { border-collapse:collapse; width:100%; }\n"
+ " .code { font-family: \"Andale Mono\", monospace; font-size:10pt }\n"
+ " .code { line-height: 1.2em }\n"
+ " .comment { color: green; font-style: oblique }\n"
+ " .keyword { color: blue }\n"
+ " .string_literal { color: red }\n"
+ " .directive { color: darkmagenta }\n"
+ // Macro expansions.
+ " .expansion { display: none; }\n"
+ " .macro:hover .expansion { display: block; border: 2px solid #FF0000; "
+ "padding: 2px; background-color:#FFF0F0; font-weight: normal; "
+ " -webkit-border-radius:5px; -webkit-box-shadow:1px 1px 7px #000; "
+ "position: absolute; top: -1em; left:10em; z-index: 1 } \n"
+ " .macro { color: darkmagenta; background-color:LemonChiffon;"
+ // Macros are position: relative to provide base for expansions.
+ " position: relative }\n"
+ " .num { width:2.5em; padding-right:2ex; background-color:#eeeeee }\n"
+ " .num { text-align:right; font-size:8pt }\n"
+ " .num { color:#444444 }\n"
+ " .line { padding-left: 1ex; border-left: 3px solid #ccc }\n"
+ " .line { white-space: pre }\n"
+ " .msg { -webkit-box-shadow:1px 1px 7px #000 }\n"
+ " .msg { -webkit-border-radius:5px }\n"
+ " .msg { font-family:Helvetica, sans-serif; font-size:8pt }\n"
+ " .msg { float:left }\n"
+ " .msg { padding:0.25em 1ex 0.25em 1ex }\n"
+ " .msg { margin-top:10px; margin-bottom:10px }\n"
+ " .msg { font-weight:bold }\n"
+ " .msg { max-width:60em; word-wrap: break-word; white-space: pre-wrap }\n"
+ " .msgT { padding:0x; spacing:0x }\n"
+ " .msgEvent { background-color:#fff8b4; color:#000000 }\n"
+ " .msgControl { background-color:#bbbbbb; color:#000000 }\n"
+ " .mrange { background-color:#dfddf3 }\n"
+ " .mrange { border-bottom:1px solid #6F9DBE }\n"
+ " .PathIndex { font-weight: bold; padding:0px 5px 0px 5px; "
+ "margin-right:5px; }\n"
+ " .PathIndex { -webkit-border-radius:8px }\n"
+ " .PathIndexEvent { background-color:#bfba87 }\n"
+ " .PathIndexControl { background-color:#8c8c8c }\n"
+ " .CodeInsertionHint { font-weight: bold; background-color: #10dd10 }\n"
+ " .CodeRemovalHint { background-color:#de1010 }\n"
+ " .CodeRemovalHint { border-bottom:1px solid #6F9DBE }\n"
+ " table.simpletable {\n"
+ " padding: 5px;\n"
+ " font-size:12pt;\n"
+ " margin:20px;\n"
+ " border-collapse: collapse; border-spacing: 0px;\n"
+ " }\n"
+ " td.rowname {\n"
+ " text-align:right; font-weight:bold; color:#444444;\n"
+ " padding-right:2ex; }\n"
+ "</style>\n</head>\n<body>";
+
+ // Generate header
+ R.InsertStrBefore(StartLoc, os.str());
+ // Generate footer
+
+ R.InsertCStrAfter(EndLoc, "</body></html>\n");
+}
+
+/// SyntaxHighlight - Relex the specified FileID and annotate the HTML with
+/// information about keywords, macro expansions etc. This uses the macro
+/// table state from the end of the file, so it won't be perfectly perfect,
+/// but it will be reasonably close.
+void html::SyntaxHighlight(Rewriter &R, FileID FID, Preprocessor &PP) {
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ const SourceManager &SM = PP.getSourceManager();
+ Lexer L(FID, SM, PP.getLangOptions());
+ const char *BufferStart = L.getBufferStart();
+
+ // Inform the preprocessor that we want to retain comments as tokens, so we
+ // can highlight them.
+ L.SetCommentRetentionState(true);
+
+ // Lex all the tokens in raw mode, to avoid entering #includes or expanding
+ // macros.
+ Token Tok;
+ L.LexFromRawLexer(Tok);
+
+ while (Tok.isNot(tok::eof)) {
+ // Since we are lexing unexpanded tokens, all tokens are from the main
+ // FileID.
+ unsigned TokOffs = SM.getFileOffset(Tok.getLocation());
+ unsigned TokLen = Tok.getLength();
+ switch (Tok.getKind()) {
+ default: break;
+ case tok::identifier: {
+ // Fill in Result.IdentifierInfo, looking up the identifier in the
+ // identifier table.
+ IdentifierInfo *II = PP.LookUpIdentifierInfo(Tok, BufferStart+TokOffs);
+
+ // If this is a pp-identifier, for a keyword, highlight it as such.
+ if (II->getTokenID() != tok::identifier)
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='keyword'>", "</span>");
+ break;
+ }
+ case tok::comment:
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='comment'>", "</span>");
+ break;
+ case tok::wide_string_literal:
+ // Chop off the L prefix
+ ++TokOffs;
+ --TokLen;
+ // FALL THROUGH.
+ case tok::string_literal:
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='string_literal'>", "</span>");
+ break;
+ case tok::hash: {
+ // If this is a preprocessor directive, all tokens to end of line are too.
+ if (!Tok.isAtStartOfLine())
+ break;
+
+ // Eat all of the tokens until we get to the next one at the start of
+ // line.
+ unsigned TokEnd = TokOffs+TokLen;
+ L.LexFromRawLexer(Tok);
+ while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) {
+ TokEnd = SM.getFileOffset(Tok.getLocation())+Tok.getLength();
+ L.LexFromRawLexer(Tok);
+ }
+
+ // Find end of line. This is a hack.
+ HighlightRange(RB, TokOffs, TokEnd, BufferStart,
+ "<span class='directive'>", "</span>");
+
+ // Don't skip the next token.
+ continue;
+ }
+ }
+
+ L.LexFromRawLexer(Tok);
+ }
+}
+
+namespace {
+/// IgnoringDiagClient - This is a diagnostic client that just ignores all
+/// diags.
+class IgnoringDiagClient : public DiagnosticClient {
+ void HandleDiagnostic(Diagnostic::Level DiagLevel,
+ const DiagnosticInfo &Info) {
+ // Just ignore it.
+ }
+};
+}
+
+/// HighlightMacros - This uses the macro table state from the end of the
+/// file, to re-expand macros and insert (into the HTML) information about the
+/// macro expansions. This won't be perfectly perfect, but it will be
+/// reasonably close.
+void html::HighlightMacros(Rewriter &R, FileID FID, Preprocessor& PP) {
+ // Re-lex the raw token stream into a token buffer.
+ const SourceManager &SM = PP.getSourceManager();
+ std::vector<Token> TokenStream;
+
+ Lexer L(FID, SM, PP.getLangOptions());
+
+ // Lex all the tokens in raw mode, to avoid entering #includes or expanding
+ // macros.
+ while (1) {
+ Token Tok;
+ L.LexFromRawLexer(Tok);
+
+ // If this is a # at the start of a line, discard it from the token stream.
+ // We don't want the re-preprocess step to see #defines, #includes or other
+ // preprocessor directives.
+ if (Tok.is(tok::hash) && Tok.isAtStartOfLine())
+ continue;
+
+ // If this is a ## token, change its kind to unknown so that repreprocessing
+ // it will not produce an error.
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+
+ // If this raw token is an identifier, the raw lexer won't have looked up
+ // the corresponding identifier info for it. Do this now so that it will be
+ // macro expanded when we re-preprocess it.
+ if (Tok.is(tok::identifier)) {
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ Tok.setKind(PP.LookUpIdentifierInfo(Tok)->getTokenID());
+ }
+
+ TokenStream.push_back(Tok);
+
+ if (Tok.is(tok::eof)) break;
+ }
+
+ // Temporarily change the diagnostics object so that we ignore any generated
+ // diagnostics from this pass.
+ IgnoringDiagClient TmpDC;
+ Diagnostic TmpDiags(&TmpDC);
+
+ Diagnostic *OldDiags = &PP.getDiagnostics();
+ PP.setDiagnostics(TmpDiags);
+
+ // Inform the preprocessor that we don't want comments.
+ PP.SetCommentRetentionState(false, false);
+
+ // Enter the tokens we just lexed. This will cause them to be macro expanded
+ // but won't enter sub-files (because we removed #'s).
+ PP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false);
+
+ TokenConcatenation ConcatInfo(PP);
+
+ // Lex all the tokens.
+ Token Tok;
+ PP.Lex(Tok);
+ while (Tok.isNot(tok::eof)) {
+ // Ignore non-macro tokens.
+ if (!Tok.getLocation().isMacroID()) {
+ PP.Lex(Tok);
+ continue;
+ }
+
+ // Okay, we have the first token of a macro expansion: highlight the
+ // instantiation by inserting a start tag before the macro instantiation and
+ // end tag after it.
+ std::pair<SourceLocation, SourceLocation> LLoc =
+ SM.getInstantiationRange(Tok.getLocation());
+
+ // Ignore tokens whose instantiation location was not the main file.
+ if (SM.getFileID(LLoc.first) != FID) {
+ PP.Lex(Tok);
+ continue;
+ }
+
+ assert(SM.getFileID(LLoc.second) == FID &&
+ "Start and end of expansion must be in the same ultimate file!");
+
+ std::string Expansion = PP.getSpelling(Tok);
+ unsigned LineLen = Expansion.size();
+
+ Token PrevTok = Tok;
+ // Okay, eat this token, getting the next one.
+ PP.Lex(Tok);
+
+ // Skip all the rest of the tokens that are part of this macro
+ // instantiation. It would be really nice to pop up a window with all the
+ // spelling of the tokens or something.
+ while (!Tok.is(tok::eof) &&
+ SM.getInstantiationLoc(Tok.getLocation()) == LLoc.first) {
+ // Insert a newline if the macro expansion is getting large.
+ if (LineLen > 60) {
+ Expansion += "<br>";
+ LineLen = 0;
+ }
+
+ LineLen -= Expansion.size();
+
+ // If the tokens were already space separated, or if they must be to avoid
+ // them being implicitly pasted, add a space between them.
+ if (Tok.hasLeadingSpace() ||
+ ConcatInfo.AvoidConcat(PrevTok, Tok))
+ Expansion += ' ';
+
+ // Escape any special characters in the token text.
+ Expansion += EscapeText(PP.getSpelling(Tok));
+ LineLen += Expansion.size();
+
+ PrevTok = Tok;
+ PP.Lex(Tok);
+ }
+
+
+ // Insert the expansion as the end tag, so that multi-line macros all get
+ // highlighted.
+ Expansion = "<span class='expansion'>" + Expansion + "</span></span>";
+
+ HighlightRange(R, LLoc.first, LLoc.second,
+ "<span class='macro'>", Expansion.c_str());
+ }
+
+ // Restore diagnostics object back to its own thing.
+ PP.setDiagnostics(*OldDiags);
+}
+
+void html::HighlightMacros(Rewriter &R, FileID FID,
+ PreprocessorFactory &PPF) {
+
+ llvm::OwningPtr<Preprocessor> PP(PPF.CreatePreprocessor());
+ HighlightMacros(R, FID, *PP);
+}
diff --git a/lib/Rewrite/Makefile b/lib/Rewrite/Makefile
new file mode 100644
index 0000000..61fdf40
--- /dev/null
+++ b/lib/Rewrite/Makefile
@@ -0,0 +1,22 @@
+##===- clang/lib/Rewrite/Makefile --------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements code transformation / rewriting facilities.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangRewrite
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Rewrite/RewriteRope.cpp b/lib/Rewrite/RewriteRope.cpp
new file mode 100644
index 0000000..61cb02b
--- /dev/null
+++ b/lib/Rewrite/RewriteRope.cpp
@@ -0,0 +1,807 @@
+//===--- RewriteRope.cpp - Rope specialized for rewriter --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RewriteRope class, which is a powerful string.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/RewriteRope.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+using namespace clang;
+using llvm::dyn_cast;
+using llvm::cast;
+
+/// RewriteRope is a "strong" string class, designed to make insertions and
+/// deletions in the middle of the string nearly constant time (really, they are
+/// O(log N), but with a very low constant factor).
+///
+/// The implementation of this datastructure is a conceptual linear sequence of
+/// RopePiece elements. Each RopePiece represents a view on a separately
+/// allocated and reference counted string. This means that splitting a very
+/// long string can be done in constant time by splitting a RopePiece that
+/// references the whole string into two rope pieces that reference each half.
+/// Once split, another string can be inserted in between the two halves by
+/// inserting a RopePiece in between the two others. All of this is very
+/// inexpensive: it takes time proportional to the number of RopePieces, not the
+/// length of the strings they represent.
+///
+/// While a linear sequences of RopePieces is the conceptual model, the actual
+/// implementation captures them in an adapted B+ Tree. Using a B+ tree (which
+/// is a tree that keeps the values in the leaves and has where each node
+/// contains a reasonable number of pointers to children/values) allows us to
+/// maintain efficient operation when the RewriteRope contains a *huge* number
+/// of RopePieces. The basic idea of the B+ Tree is that it allows us to find
+/// the RopePiece corresponding to some offset very efficiently, and it
+/// automatically balances itself on insertions of RopePieces (which can happen
+/// for both insertions and erases of string ranges).
+///
+/// The one wrinkle on the theory is that we don't attempt to keep the tree
+/// properly balanced when erases happen. Erases of string data can both insert
+/// new RopePieces (e.g. when the middle of some other rope piece is deleted,
+/// which results in two rope pieces, which is just like an insert) or it can
+/// reduce the number of RopePieces maintained by the B+Tree. In the case when
+/// the number of RopePieces is reduced, we don't attempt to maintain the
+/// standard 'invariant' that each node in the tree contains at least
+/// 'WidthFactor' children/values. For our use cases, this doesn't seem to
+/// matter.
+///
+/// The implementation below is primarily implemented in terms of three classes:
+/// RopePieceBTreeNode - Common base class for:
+///
+/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
+/// nodes. This directly represents a chunk of the string with those
+/// RopePieces contatenated.
+/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages
+/// up to '2*WidthFactor' other nodes in the tree.
+
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeNode Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeNode - Common base class of RopePieceBTreeLeaf and
+ /// RopePieceBTreeInterior. This provides some 'virtual' dispatching methods
+ /// and a flag that determines which subclass the instance is. Also
+ /// important, this node knows the full extend of the node, including any
+ /// children that it has. This allows efficient skipping over entire subtrees
+ /// when looking for an offset in the BTree.
+ class RopePieceBTreeNode {
+ protected:
+ /// WidthFactor - This controls the number of K/V slots held in the BTree:
+ /// how wide it is. Each level of the BTree is guaranteed to have at least
+ /// 'WidthFactor' elements in it (either ropepieces or children), (except
+ /// the root, which may have less) and may have at most 2*WidthFactor
+ /// elements.
+ enum { WidthFactor = 8 };
+
+ /// Size - This is the number of bytes of file this node (including any
+ /// potential children) covers.
+ unsigned Size;
+
+ /// IsLeaf - True if this is an instance of RopePieceBTreeLeaf, false if it
+ /// is an instance of RopePieceBTreeInterior.
+ bool IsLeaf;
+
+ RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {}
+ ~RopePieceBTreeNode() {}
+ public:
+
+ bool isLeaf() const { return IsLeaf; }
+ unsigned size() const { return Size; }
+
+ void Destroy();
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ static inline bool classof(const RopePieceBTreeNode *) { return true; }
+
+ };
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeLeaf Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
+ /// nodes. This directly represents a chunk of the string with those
+ /// RopePieces contatenated. Since this is a B+Tree, all values (in this case
+ /// instances of RopePiece) are stored in leaves like this. To make iteration
+ /// over the leaves efficient, they maintain a singly linked list through the
+ /// NextLeaf field. This allows the B+Tree forward iterator to be constant
+ /// time for all increments.
+ class RopePieceBTreeLeaf : public RopePieceBTreeNode {
+ /// NumPieces - This holds the number of rope pieces currently active in the
+ /// Pieces array.
+ unsigned char NumPieces;
+
+ /// Pieces - This tracks the file chunks currently in this leaf.
+ ///
+ RopePiece Pieces[2*WidthFactor];
+
+ /// NextLeaf - This is a pointer to the next leaf in the tree, allowing
+ /// efficient in-order forward iteration of the tree without traversal.
+ RopePieceBTreeLeaf **PrevLeaf, *NextLeaf;
+ public:
+ RopePieceBTreeLeaf() : RopePieceBTreeNode(true), NumPieces(0),
+ PrevLeaf(0), NextLeaf(0) {}
+ ~RopePieceBTreeLeaf() {
+ if (PrevLeaf || NextLeaf)
+ removeFromLeafInOrder();
+ }
+
+ bool isFull() const { return NumPieces == 2*WidthFactor; }
+
+ /// clear - Remove all rope pieces from this leaf.
+ void clear() {
+ while (NumPieces)
+ Pieces[--NumPieces] = RopePiece();
+ Size = 0;
+ }
+
+ unsigned getNumPieces() const { return NumPieces; }
+
+ const RopePiece &getPiece(unsigned i) const {
+ assert(i < getNumPieces() && "Invalid piece ID");
+ return Pieces[i];
+ }
+
+ const RopePieceBTreeLeaf *getNextLeafInOrder() const { return NextLeaf; }
+ void insertAfterLeafInOrder(RopePieceBTreeLeaf *Node) {
+ assert(PrevLeaf == 0 && NextLeaf == 0 && "Already in ordering");
+
+ NextLeaf = Node->NextLeaf;
+ if (NextLeaf)
+ NextLeaf->PrevLeaf = &NextLeaf;
+ PrevLeaf = &Node->NextLeaf;
+ Node->NextLeaf = this;
+ }
+
+ void removeFromLeafInOrder() {
+ if (PrevLeaf) {
+ *PrevLeaf = NextLeaf;
+ if (NextLeaf)
+ NextLeaf->PrevLeaf = PrevLeaf;
+ } else if (NextLeaf) {
+ NextLeaf->PrevLeaf = 0;
+ }
+ }
+
+ /// FullRecomputeSizeLocally - This method recomputes the 'Size' field by
+ /// summing the size of all RopePieces.
+ void FullRecomputeSizeLocally() {
+ Size = 0;
+ for (unsigned i = 0, e = getNumPieces(); i != e; ++i)
+ Size += getPiece(i).size();
+ }
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ static inline bool classof(const RopePieceBTreeLeaf *) { return true; }
+ static inline bool classof(const RopePieceBTreeNode *N) {
+ return N->isLeaf();
+ }
+ };
+} // end anonymous namespace
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeLeaf::split(unsigned Offset) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ if (Offset == 0 || Offset == size()) {
+ // Fastpath for a common case. There is already a splitpoint at the end.
+ return 0;
+ }
+
+ // Find the piece that this offset lands in.
+ unsigned PieceOffs = 0;
+ unsigned i = 0;
+ while (Offset >= PieceOffs+Pieces[i].size()) {
+ PieceOffs += Pieces[i].size();
+ ++i;
+ }
+
+ // If there is already a split point at the specified offset, just return
+ // success.
+ if (PieceOffs == Offset)
+ return 0;
+
+ // Otherwise, we need to split piece 'i' at Offset-PieceOffs. Convert Offset
+ // to being Piece relative.
+ unsigned IntraPieceOffset = Offset-PieceOffs;
+
+ // We do this by shrinking the RopePiece and then doing an insert of the tail.
+ RopePiece Tail(Pieces[i].StrData, Pieces[i].StartOffs+IntraPieceOffset,
+ Pieces[i].EndOffs);
+ Size -= Pieces[i].size();
+ Pieces[i].EndOffs = Pieces[i].StartOffs+IntraPieceOffset;
+ Size += Pieces[i].size();
+
+ return insert(Offset, Tail);
+}
+
+
+/// insert - Insert the specified RopePiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeLeaf::insert(unsigned Offset,
+ const RopePiece &R) {
+ // If this node is not full, insert the piece.
+ if (!isFull()) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ unsigned i = 0, e = getNumPieces();
+ if (Offset == size()) {
+ // Fastpath for a common case.
+ i = e;
+ } else {
+ unsigned SlotOffs = 0;
+ for (; Offset > SlotOffs; ++i)
+ SlotOffs += getPiece(i).size();
+ assert(SlotOffs == Offset && "Split didn't occur before insertion!");
+ }
+
+ // For an insertion into a non-full leaf node, just insert the value in
+ // its sorted position. This requires moving later values over.
+ for (; i != e; --e)
+ Pieces[e] = Pieces[e-1];
+ Pieces[i] = R;
+ ++NumPieces;
+ Size += R.size();
+ return 0;
+ }
+
+ // Otherwise, if this is leaf is full, split it in two halves. Since this
+ // node is full, it contains 2*WidthFactor values. We move the first
+ // 'WidthFactor' values to the LHS child (which we leave in this node) and
+ // move the last 'WidthFactor' values into the RHS child.
+
+ // Create the new node.
+ RopePieceBTreeLeaf *NewNode = new RopePieceBTreeLeaf();
+
+ // Move over the last 'WidthFactor' values from here to NewNode.
+ std::copy(&Pieces[WidthFactor], &Pieces[2*WidthFactor],
+ &NewNode->Pieces[0]);
+ // Replace old pieces with null RopePieces to drop refcounts.
+ std::fill(&Pieces[WidthFactor], &Pieces[2*WidthFactor], RopePiece());
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumPieces = NumPieces = WidthFactor;
+
+ // Recompute the two nodes' size.
+ NewNode->FullRecomputeSizeLocally();
+ FullRecomputeSizeLocally();
+
+ // Update the list of leaves.
+ NewNode->insertAfterLeafInOrder(this);
+
+ // These insertions can't fail.
+ if (this->size() >= Offset)
+ this->insert(Offset, R);
+ else
+ NewNode->insert(Offset - this->size(), R);
+ return NewNode;
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeLeaf::erase(unsigned Offset, unsigned NumBytes) {
+ // Since we are guaranteed that there is a split at Offset, we start by
+ // finding the Piece that starts there.
+ unsigned PieceOffs = 0;
+ unsigned i = 0;
+ for (; Offset > PieceOffs; ++i)
+ PieceOffs += getPiece(i).size();
+ assert(PieceOffs == Offset && "Split didn't occur before erase!");
+
+ unsigned StartPiece = i;
+
+ // Figure out how many pieces completely cover 'NumBytes'. We want to remove
+ // all of them.
+ for (; Offset+NumBytes > PieceOffs+getPiece(i).size(); ++i)
+ PieceOffs += getPiece(i).size();
+
+ // If we exactly include the last one, include it in the region to delete.
+ if (Offset+NumBytes == PieceOffs+getPiece(i).size())
+ PieceOffs += getPiece(i).size(), ++i;
+
+ // If we completely cover some RopePieces, erase them now.
+ if (i != StartPiece) {
+ unsigned NumDeleted = i-StartPiece;
+ for (; i != getNumPieces(); ++i)
+ Pieces[i-NumDeleted] = Pieces[i];
+
+ // Drop references to dead rope pieces.
+ std::fill(&Pieces[getNumPieces()-NumDeleted], &Pieces[getNumPieces()],
+ RopePiece());
+ NumPieces -= NumDeleted;
+
+ unsigned CoverBytes = PieceOffs-Offset;
+ NumBytes -= CoverBytes;
+ Size -= CoverBytes;
+ }
+
+ // If we completely removed some stuff, we could be done.
+ if (NumBytes == 0) return;
+
+ // Okay, now might be erasing part of some Piece. If this is the case, then
+ // move the start point of the piece.
+ assert(getPiece(StartPiece).size() > NumBytes);
+ Pieces[StartPiece].StartOffs += NumBytes;
+
+ // The size of this node just shrunk by NumBytes.
+ Size -= NumBytes;
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeInterior Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeInterior - This represents an interior node in the B+Tree,
+ /// which holds up to 2*WidthFactor pointers to child nodes.
+ class RopePieceBTreeInterior : public RopePieceBTreeNode {
+ /// NumChildren - This holds the number of children currently active in the
+ /// Children array.
+ unsigned char NumChildren;
+ RopePieceBTreeNode *Children[2*WidthFactor];
+ public:
+ RopePieceBTreeInterior() : RopePieceBTreeNode(false), NumChildren(0) {}
+
+ RopePieceBTreeInterior(RopePieceBTreeNode *LHS, RopePieceBTreeNode *RHS)
+ : RopePieceBTreeNode(false) {
+ Children[0] = LHS;
+ Children[1] = RHS;
+ NumChildren = 2;
+ Size = LHS->size() + RHS->size();
+ }
+
+ bool isFull() const { return NumChildren == 2*WidthFactor; }
+
+ unsigned getNumChildren() const { return NumChildren; }
+ const RopePieceBTreeNode *getChild(unsigned i) const {
+ assert(i < NumChildren && "invalid child #");
+ return Children[i];
+ }
+ RopePieceBTreeNode *getChild(unsigned i) {
+ assert(i < NumChildren && "invalid child #");
+ return Children[i];
+ }
+
+ /// FullRecomputeSizeLocally - Recompute the Size field of this node by
+ /// summing up the sizes of the child nodes.
+ void FullRecomputeSizeLocally() {
+ Size = 0;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ Size += getChild(i)->size();
+ }
+
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+ /// HandleChildPiece - A child propagated an insertion result up to us.
+ /// Insert the new child, and/or propagate the result further up the tree.
+ RopePieceBTreeNode *HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS);
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ static inline bool classof(const RopePieceBTreeInterior *) { return true; }
+ static inline bool classof(const RopePieceBTreeNode *N) {
+ return !N->isLeaf();
+ }
+ };
+} // end anonymous namespace
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeInterior::split(unsigned Offset) {
+ // Figure out which child to split.
+ if (Offset == 0 || Offset == size())
+ return 0; // If we have an exact offset, we're already split.
+
+ unsigned ChildOffset = 0;
+ unsigned i = 0;
+ for (; Offset >= ChildOffset+getChild(i)->size(); ++i)
+ ChildOffset += getChild(i)->size();
+
+ // If already split there, we're done.
+ if (ChildOffset == Offset)
+ return 0;
+
+ // Otherwise, recursively split the child.
+ if (RopePieceBTreeNode *RHS = getChild(i)->split(Offset-ChildOffset))
+ return HandleChildPiece(i, RHS);
+ return 0; // Done!
+}
+
+/// insert - Insert the specified ropepiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the
+/// node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeInterior::insert(unsigned Offset,
+ const RopePiece &R) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ unsigned i = 0, e = getNumChildren();
+
+ unsigned ChildOffs = 0;
+ if (Offset == size()) {
+ // Fastpath for a common case. Insert at end of last child.
+ i = e-1;
+ ChildOffs = size()-getChild(i)->size();
+ } else {
+ for (; Offset > ChildOffs+getChild(i)->size(); ++i)
+ ChildOffs += getChild(i)->size();
+ }
+
+ Size += R.size();
+
+ // Insert at the end of this child.
+ if (RopePieceBTreeNode *RHS = getChild(i)->insert(Offset-ChildOffs, R))
+ return HandleChildPiece(i, RHS);
+
+ return 0;
+}
+
+/// HandleChildPiece - A child propagated an insertion result up to us.
+/// Insert the new child, and/or propagate the result further up the tree.
+RopePieceBTreeNode *
+RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) {
+ // Otherwise the child propagated a subtree up to us as a new child. See if
+ // we have space for it here.
+ if (!isFull()) {
+ // Insert RHS after child 'i'.
+ if (i + 1 != getNumChildren())
+ memmove(&Children[i+2], &Children[i+1],
+ (getNumChildren()-i-1)*sizeof(Children[0]));
+ Children[i+1] = RHS;
+ ++NumChildren;
+ return false;
+ }
+
+ // Okay, this node is full. Split it in half, moving WidthFactor children to
+ // a newly allocated interior node.
+
+ // Create the new node.
+ RopePieceBTreeInterior *NewNode = new RopePieceBTreeInterior();
+
+ // Move over the last 'WidthFactor' values from here to NewNode.
+ memcpy(&NewNode->Children[0], &Children[WidthFactor],
+ WidthFactor*sizeof(Children[0]));
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumChildren = NumChildren = WidthFactor;
+
+ // Finally, insert the two new children in the side the can (now) hold them.
+ // These insertions can't fail.
+ if (i < WidthFactor)
+ this->HandleChildPiece(i, RHS);
+ else
+ NewNode->HandleChildPiece(i-WidthFactor, RHS);
+
+ // Recompute the two nodes' size.
+ NewNode->FullRecomputeSizeLocally();
+ FullRecomputeSizeLocally();
+ return NewNode;
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeInterior::erase(unsigned Offset, unsigned NumBytes) {
+ // This will shrink this node by NumBytes.
+ Size -= NumBytes;
+
+ // Find the first child that overlaps with Offset.
+ unsigned i = 0;
+ for (; Offset >= getChild(i)->size(); ++i)
+ Offset -= getChild(i)->size();
+
+ // Propagate the delete request into overlapping children, or completely
+ // delete the children as appropriate.
+ while (NumBytes) {
+ RopePieceBTreeNode *CurChild = getChild(i);
+
+ // If we are deleting something contained entirely in the child, pass on the
+ // request.
+ if (Offset+NumBytes < CurChild->size()) {
+ CurChild->erase(Offset, NumBytes);
+ return;
+ }
+
+ // If this deletion request starts somewhere in the middle of the child, it
+ // must be deleting to the end of the child.
+ if (Offset) {
+ unsigned BytesFromChild = CurChild->size()-Offset;
+ CurChild->erase(Offset, BytesFromChild);
+ NumBytes -= BytesFromChild;
+ // Start at the beginning of the next child.
+ Offset = 0;
+ ++i;
+ continue;
+ }
+
+ // If the deletion request completely covers the child, delete it and move
+ // the rest down.
+ NumBytes -= CurChild->size();
+ CurChild->Destroy();
+ --NumChildren;
+ if (i != getNumChildren())
+ memmove(&Children[i], &Children[i+1],
+ (getNumChildren()-i)*sizeof(Children[0]));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeNode Implementation
+//===----------------------------------------------------------------------===//
+
+void RopePieceBTreeNode::Destroy() {
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ delete Leaf;
+ else
+ delete cast<RopePieceBTreeInterior>(this);
+}
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) {
+ assert(Offset <= size() && "Invalid offset to split!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->split(Offset);
+ return cast<RopePieceBTreeInterior>(this)->split(Offset);
+}
+
+/// insert - Insert the specified ropepiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the
+/// node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset,
+ const RopePiece &R) {
+ assert(Offset <= size() && "Invalid offset to insert!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->insert(Offset, R);
+ return cast<RopePieceBTreeInterior>(this)->insert(Offset, R);
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeNode::erase(unsigned Offset, unsigned NumBytes) {
+ assert(Offset+NumBytes <= size() && "Invalid offset to erase!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->erase(Offset, NumBytes);
+ return cast<RopePieceBTreeInterior>(this)->erase(Offset, NumBytes);
+}
+
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeIterator Implementation
+//===----------------------------------------------------------------------===//
+
+static const RopePieceBTreeLeaf *getCN(const void *P) {
+ return static_cast<const RopePieceBTreeLeaf*>(P);
+}
+
+// begin iterator.
+RopePieceBTreeIterator::RopePieceBTreeIterator(const void *n) {
+ const RopePieceBTreeNode *N = static_cast<const RopePieceBTreeNode*>(n);
+
+ // Walk down the left side of the tree until we get to a leaf.
+ while (const RopePieceBTreeInterior *IN = dyn_cast<RopePieceBTreeInterior>(N))
+ N = IN->getChild(0);
+
+ // We must have at least one leaf.
+ CurNode = cast<RopePieceBTreeLeaf>(N);
+
+ // If we found a leaf that happens to be empty, skip over it until we get
+ // to something full.
+ while (CurNode && getCN(CurNode)->getNumPieces() == 0)
+ CurNode = getCN(CurNode)->getNextLeafInOrder();
+
+ if (CurNode != 0)
+ CurPiece = &getCN(CurNode)->getPiece(0);
+ else // Empty tree, this is an end() iterator.
+ CurPiece = 0;
+ CurChar = 0;
+}
+
+void RopePieceBTreeIterator::MoveToNextPiece() {
+ if (CurPiece != &getCN(CurNode)->getPiece(getCN(CurNode)->getNumPieces()-1)) {
+ CurChar = 0;
+ ++CurPiece;
+ return;
+ }
+
+ // Find the next non-empty leaf node.
+ do
+ CurNode = getCN(CurNode)->getNextLeafInOrder();
+ while (CurNode && getCN(CurNode)->getNumPieces() == 0);
+
+ if (CurNode != 0)
+ CurPiece = &getCN(CurNode)->getPiece(0);
+ else // Hit end().
+ CurPiece = 0;
+ CurChar = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTree Implementation
+//===----------------------------------------------------------------------===//
+
+static RopePieceBTreeNode *getRoot(void *P) {
+ return static_cast<RopePieceBTreeNode*>(P);
+}
+
+RopePieceBTree::RopePieceBTree() {
+ Root = new RopePieceBTreeLeaf();
+}
+RopePieceBTree::RopePieceBTree(const RopePieceBTree &RHS) {
+ assert(RHS.empty() && "Can't copy non-empty tree yet");
+ Root = new RopePieceBTreeLeaf();
+}
+RopePieceBTree::~RopePieceBTree() {
+ getRoot(Root)->Destroy();
+}
+
+unsigned RopePieceBTree::size() const {
+ return getRoot(Root)->size();
+}
+
+void RopePieceBTree::clear() {
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(getRoot(Root)))
+ Leaf->clear();
+ else {
+ getRoot(Root)->Destroy();
+ Root = new RopePieceBTreeLeaf();
+ }
+}
+
+void RopePieceBTree::insert(unsigned Offset, const RopePiece &R) {
+ // #1. Split at Offset.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+
+ // #2. Do the insertion.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->insert(Offset, R))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+}
+
+void RopePieceBTree::erase(unsigned Offset, unsigned NumBytes) {
+ // #1. Split at Offset.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+
+ // #2. Do the erasing.
+ getRoot(Root)->erase(Offset, NumBytes);
+}
+
+//===----------------------------------------------------------------------===//
+// RewriteRope Implementation
+//===----------------------------------------------------------------------===//
+
+/// MakeRopeString - This copies the specified byte range into some instance of
+/// RopeRefCountString, and return a RopePiece that represents it. This uses
+/// the AllocBuffer object to aggregate requests for small strings into one
+/// allocation instead of doing tons of tiny allocations.
+RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
+ unsigned Len = End-Start;
+ assert(Len && "Zero length RopePiece is invalid!");
+
+ // If we have space for this string in the current alloc buffer, use it.
+ if (AllocOffs+Len <= AllocChunkSize) {
+ memcpy(AllocBuffer->Data+AllocOffs, Start, Len);
+ AllocOffs += Len;
+ return RopePiece(AllocBuffer, AllocOffs-Len, AllocOffs);
+ }
+
+ // If we don't have enough room because this specific allocation is huge,
+ // just allocate a new rope piece for it alone.
+ if (Len > AllocChunkSize) {
+ unsigned Size = End-Start+sizeof(RopeRefCountString)-1;
+ RopeRefCountString *Res =
+ reinterpret_cast<RopeRefCountString *>(new char[Size]);
+ Res->RefCount = 0;
+ memcpy(Res->Data, Start, End-Start);
+ return RopePiece(Res, 0, End-Start);
+ }
+
+ // Otherwise, this was a small request but we just don't have space for it
+ // Make a new chunk and share it with later allocations.
+
+ // If we had an old allocation, drop our reference to it.
+ if (AllocBuffer && --AllocBuffer->RefCount == 0)
+ delete [] (char*)AllocBuffer;
+
+ unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize;
+ AllocBuffer = reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
+ AllocBuffer->RefCount = 0;
+ memcpy(AllocBuffer->Data, Start, Len);
+ AllocOffs = Len;
+
+ // Start out the new allocation with a refcount of 1, since we have an
+ // internal reference to it.
+ AllocBuffer->addRef();
+ return RopePiece(AllocBuffer, 0, Len);
+}
+
+
diff --git a/lib/Rewrite/Rewriter.cpp b/lib/Rewrite/Rewriter.cpp
new file mode 100644
index 0000000..d81c38d
--- /dev/null
+++ b/lib/Rewrite/Rewriter.cpp
@@ -0,0 +1,228 @@
+//===--- Rewriter.cpp - Code rewriting interface --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Rewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Decl.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size) {
+ // Nothing to remove, exit early.
+ if (Size == 0) return;
+
+ unsigned RealOffset = getMappedOffset(OrigOffset, true);
+ assert(RealOffset+Size < Buffer.size() && "Invalid location");
+
+ // Remove the dead characters.
+ Buffer.erase(RealOffset, Size);
+
+ // Add a delta so that future changes are offset correctly.
+ AddReplaceDelta(OrigOffset, -Size);
+}
+
+void RewriteBuffer::InsertText(unsigned OrigOffset,
+ const char *StrData, unsigned StrLen,
+ bool InsertAfter) {
+
+ // Nothing to insert, exit early.
+ if (StrLen == 0) return;
+
+ unsigned RealOffset = getMappedOffset(OrigOffset, InsertAfter);
+ Buffer.insert(RealOffset, StrData, StrData+StrLen);
+
+ // Add a delta so that future changes are offset correctly.
+ AddInsertDelta(OrigOffset, StrLen);
+}
+
+/// ReplaceText - This method replaces a range of characters in the input
+/// buffer with a new string. This is effectively a combined "remove+insert"
+/// operation.
+void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength,
+ const char *NewStr, unsigned NewLength) {
+ unsigned RealOffset = getMappedOffset(OrigOffset, true);
+ Buffer.erase(RealOffset, OrigLength);
+ Buffer.insert(RealOffset, NewStr, NewStr+NewLength);
+ if (OrigLength != NewLength)
+ AddReplaceDelta(OrigOffset, NewLength-OrigLength);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Rewriter class
+//===----------------------------------------------------------------------===//
+
+/// getRangeSize - Return the size in bytes of the specified range if they
+/// are in the same file. If not, this returns -1.
+int Rewriter::getRangeSize(SourceRange Range) const {
+ if (!isRewritable(Range.getBegin()) ||
+ !isRewritable(Range.getEnd())) return -1;
+
+ FileID StartFileID, EndFileID;
+ unsigned StartOff, EndOff;
+
+ StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
+
+ if (StartFileID != EndFileID)
+ return -1;
+
+ // If edits have been made to this buffer, the delta between the range may
+ // have changed.
+ std::map<FileID, RewriteBuffer>::const_iterator I =
+ RewriteBuffers.find(StartFileID);
+ if (I != RewriteBuffers.end()) {
+ const RewriteBuffer &RB = I->second;
+ EndOff = RB.getMappedOffset(EndOff, true);
+ StartOff = RB.getMappedOffset(StartOff);
+ }
+
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token.
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+
+ return EndOff-StartOff;
+}
+
+/// getRewritenText - Return the rewritten form of the text in the specified
+/// range. If the start or end of the range was unrewritable or if they are
+/// in different buffers, this returns an empty string.
+///
+/// Note that this method is not particularly efficient.
+///
+std::string Rewriter::getRewritenText(SourceRange Range) const {
+ if (!isRewritable(Range.getBegin()) ||
+ !isRewritable(Range.getEnd()))
+ return "";
+
+ FileID StartFileID, EndFileID;
+ unsigned StartOff, EndOff;
+ StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
+
+ if (StartFileID != EndFileID)
+ return ""; // Start and end in different buffers.
+
+ // If edits have been made to this buffer, the delta between the range may
+ // have changed.
+ std::map<FileID, RewriteBuffer>::const_iterator I =
+ RewriteBuffers.find(StartFileID);
+ if (I == RewriteBuffers.end()) {
+ // If the buffer hasn't been rewritten, just return the text from the input.
+ const char *Ptr = SourceMgr->getCharacterData(Range.getBegin());
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token.
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+ return std::string(Ptr, Ptr+EndOff-StartOff);
+ }
+
+ const RewriteBuffer &RB = I->second;
+ EndOff = RB.getMappedOffset(EndOff, true);
+ StartOff = RB.getMappedOffset(StartOff);
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token.
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+
+ // Advance the iterators to the right spot, yay for linear time algorithms.
+ RewriteBuffer::iterator Start = RB.begin();
+ std::advance(Start, StartOff);
+ RewriteBuffer::iterator End = Start;
+ std::advance(End, EndOff-StartOff);
+
+ return std::string(Start, End);
+}
+
+unsigned Rewriter::getLocationOffsetAndFileID(SourceLocation Loc,
+ FileID &FID) const {
+ assert(Loc.isValid() && "Invalid location");
+ std::pair<FileID,unsigned> V = SourceMgr->getDecomposedLoc(Loc);
+ FID = V.first;
+ return V.second;
+}
+
+
+/// getEditBuffer - Get or create a RewriteBuffer for the specified FileID.
+///
+RewriteBuffer &Rewriter::getEditBuffer(FileID FID) {
+ std::map<FileID, RewriteBuffer>::iterator I =
+ RewriteBuffers.lower_bound(FID);
+ if (I != RewriteBuffers.end() && I->first == FID)
+ return I->second;
+ I = RewriteBuffers.insert(I, std::make_pair(FID, RewriteBuffer()));
+
+ std::pair<const char*, const char*> MB = SourceMgr->getBufferData(FID);
+ I->second.Initialize(MB.first, MB.second);
+
+ return I->second;
+}
+
+/// InsertText - Insert the specified string at the specified location in the
+/// original buffer.
+bool Rewriter::InsertText(SourceLocation Loc, const char *StrData,
+ unsigned StrLen, bool InsertAfter) {
+ if (!isRewritable(Loc)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
+ getEditBuffer(FID).InsertText(StartOffs, StrData, StrLen, InsertAfter);
+ return false;
+}
+
+/// RemoveText - Remove the specified text region.
+bool Rewriter::RemoveText(SourceLocation Start, unsigned Length) {
+ if (!isRewritable(Start)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Start, FID);
+ getEditBuffer(FID).RemoveText(StartOffs, Length);
+ return false;
+}
+
+/// ReplaceText - This method replaces a range of characters in the input
+/// buffer with a new string. This is effectively a combined "remove/insert"
+/// operation.
+bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength,
+ const char *NewStr, unsigned NewLength) {
+ if (!isRewritable(Start)) return true;
+ FileID StartFileID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID);
+
+ getEditBuffer(StartFileID).ReplaceText(StartOffs, OrigLength,
+ NewStr, NewLength);
+ return false;
+}
+
+/// ReplaceStmt - This replaces a Stmt/Expr with another, using the pretty
+/// printer to generate the replacement code. This returns true if the input
+/// could not be rewritten, or false if successful.
+bool Rewriter::ReplaceStmt(Stmt *From, Stmt *To) {
+ // Measaure the old text.
+ int Size = getRangeSize(From->getSourceRange());
+ if (Size == -1)
+ return true;
+
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ To->printPretty(S);
+ const std::string &Str = S.str();
+
+ ReplaceText(From->getLocStart(), Size, &Str[0], Str.size());
+ return false;
+}
+
+
diff --git a/lib/Rewrite/TokenRewriter.cpp b/lib/Rewrite/TokenRewriter.cpp
new file mode 100644
index 0000000..e17e801
--- /dev/null
+++ b/lib/Rewrite/TokenRewriter.cpp
@@ -0,0 +1,98 @@
+//===--- TokenRewriter.cpp - Token-based code rewriting interface ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenRewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/TokenRewriter.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Basic/SourceManager.h"
+using namespace clang;
+
+TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
+ const LangOptions &LangOpts) {
+ ScratchBuf.reset(new ScratchBuffer(SM));
+
+ // Create a lexer to lex all the tokens of the main file in raw mode.
+ Lexer RawLex(FID, SM, LangOpts);
+
+ // Return all comments and whitespace as tokens.
+ RawLex.SetKeepWhitespaceMode(true);
+
+ // Lex the file, populating our datastructures.
+ Token RawTok;
+ RawLex.LexFromRawLexer(RawTok);
+ while (RawTok.isNot(tok::eof)) {
+#if 0
+ if (Tok.is(tok::identifier)) {
+ // Look up the identifier info for the token. This should use
+ // IdentifierTable directly instead of PP.
+ Tok.setIdentifierInfo(PP.LookUpIdentifierInfo(Tok));
+ }
+#endif
+
+ AddToken(RawTok, TokenList.end());
+ RawLex.LexFromRawLexer(RawTok);
+ }
+}
+
+TokenRewriter::~TokenRewriter() {
+}
+
+
+/// RemapIterator - Convert from token_iterator (a const iterator) to
+/// TokenRefTy (a non-const iterator).
+TokenRewriter::TokenRefTy TokenRewriter::RemapIterator(token_iterator I) {
+ if (I == token_end()) return TokenList.end();
+
+ // FIXME: This is horrible, we should use our own list or something to avoid
+ // this.
+ std::map<SourceLocation, TokenRefTy>::iterator MapIt =
+ TokenAtLoc.find(I->getLocation());
+ assert(MapIt != TokenAtLoc.end() && "iterator not in rewriter?");
+ return MapIt->second;
+}
+
+
+/// AddToken - Add the specified token into the Rewriter before the other
+/// position.
+TokenRewriter::TokenRefTy
+TokenRewriter::AddToken(const Token &T, TokenRefTy Where) {
+ Where = TokenList.insert(Where, T);
+
+ bool InsertSuccess = TokenAtLoc.insert(std::make_pair(T.getLocation(),
+ Where)).second;
+ assert(InsertSuccess && "Token location already in rewriter!");
+ InsertSuccess = InsertSuccess;
+ return Where;
+}
+
+
+TokenRewriter::token_iterator
+TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) {
+ unsigned Len = strlen(Val);
+
+ // Plop the string into the scratch buffer, then create a token for this
+ // string.
+ Token Tok;
+ Tok.startToken();
+ const char *Spelling;
+ Tok.setLocation(ScratchBuf->getToken(Val, Len, Spelling));
+ Tok.setLength(Len);
+
+ // TODO: Form a whole lexer around this and relex the token! For now, just
+ // set kind to tok::unknown.
+ Tok.setKind(tok::unknown);
+
+ return AddToken(Tok, RemapIterator(I));
+}
+
diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt
new file mode 100644
index 0000000..321dac1
--- /dev/null
+++ b/lib/Sema/CMakeLists.txt
@@ -0,0 +1,33 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangSema
+ IdentifierResolver.cpp
+ JumpDiagnostics.cpp
+ ParseAST.cpp
+ Sema.cpp
+ SemaAccess.cpp
+ SemaAttr.cpp
+ SemaChecking.cpp
+ SemaCXXScopeSpec.cpp
+ SemaDeclAttr.cpp
+ SemaDecl.cpp
+ SemaDeclCXX.cpp
+ SemaDeclObjC.cpp
+ SemaExpr.cpp
+ SemaExprCXX.cpp
+ SemaExprObjC.cpp
+ SemaInherit.cpp
+ SemaInit.cpp
+ SemaLookup.cpp
+ SemaNamedCast.cpp
+ SemaOverload.cpp
+ SemaStmt.cpp
+ SemaTemplate.cpp
+ SemaTemplateInstantiate.cpp
+ SemaTemplateInstantiateDecl.cpp
+ SemaTemplateInstantiateExpr.cpp
+ SemaTemplateInstantiateStmt.cpp
+ SemaType.cpp
+ )
+
+add_dependencies(clangSema ClangDiagnosticSema)
diff --git a/lib/Sema/CXXFieldCollector.h b/lib/Sema/CXXFieldCollector.h
new file mode 100644
index 0000000..69d1351
--- /dev/null
+++ b/lib/Sema/CXXFieldCollector.h
@@ -0,0 +1,76 @@
+//===- CXXFieldCollector.h - Utility class for C++ class semantic analysis ===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides CXXFieldCollector that is used during parsing & semantic
+// analysis of C++ classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_CXXFIELDCOLLECTOR_H
+#define LLVM_CLANG_SEMA_CXXFIELDCOLLECTOR_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+ class FieldDecl;
+
+/// CXXFieldCollector - Used to keep track of CXXFieldDecls during parsing of
+/// C++ classes.
+class CXXFieldCollector {
+ /// Fields - Contains all FieldDecls collected during parsing of a C++
+ /// class. When a nested class is entered, its fields are appended to the
+ /// fields of its parent class, when it is exited its fields are removed.
+ llvm::SmallVector<FieldDecl*, 32> Fields;
+
+ /// FieldCount - Each entry represents the number of fields collected during
+ /// the parsing of a C++ class. When a nested class is entered, a new field
+ /// count is pushed, when it is exited, the field count is popped.
+ llvm::SmallVector<size_t, 4> FieldCount;
+
+ // Example:
+ //
+ // class C {
+ // int x,y;
+ // class NC {
+ // int q;
+ // // At this point, Fields contains [x,y,q] decls and FieldCount contains
+ // // [2,1].
+ // };
+ // int z;
+ // // At this point, Fields contains [x,y,z] decls and FieldCount contains
+ // // [3].
+ // };
+
+public:
+ /// StartClass - Called by Sema::ActOnStartCXXClassDef.
+ void StartClass() { FieldCount.push_back(0); }
+
+ /// Add - Called by Sema::ActOnCXXMemberDeclarator.
+ void Add(FieldDecl *D) {
+ Fields.push_back(D);
+ ++FieldCount.back();
+ }
+
+ /// getCurNumField - The number of fields added to the currently parsed class.
+ size_t getCurNumFields() const { return FieldCount.back(); }
+
+ /// getCurFields - Pointer to array of fields added to the currently parsed
+ /// class.
+ FieldDecl **getCurFields() { return &*(Fields.end() - getCurNumFields()); }
+
+ /// FinishClass - Called by Sema::ActOnFinishCXXClassDef.
+ void FinishClass() {
+ Fields.resize(Fields.size() - getCurNumFields());
+ FieldCount.pop_back();
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/lib/Sema/IdentifierResolver.cpp b/lib/Sema/IdentifierResolver.cpp
new file mode 100644
index 0000000..ceab859
--- /dev/null
+++ b/lib/Sema/IdentifierResolver.cpp
@@ -0,0 +1,293 @@
+//===- IdentifierResolver.cpp - Lexical Scope Name lookup -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IdentifierResolver class, which is used for lexical
+// scoped lookup, based on declaration names.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IdentifierResolver.h"
+#include "clang/Basic/LangOptions.h"
+#include <list>
+#include <vector>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfoMap class
+//===----------------------------------------------------------------------===//
+
+/// IdDeclInfoMap - Associates IdDeclInfos with declaration names.
+/// Allocates 'pools' (vectors of IdDeclInfos) to avoid allocating each
+/// individual IdDeclInfo to heap.
+class IdentifierResolver::IdDeclInfoMap {
+ static const unsigned int VECTOR_SIZE = 512;
+ // Holds vectors of IdDeclInfos that serve as 'pools'.
+ // New vectors are added when the current one is full.
+ std::list< std::vector<IdDeclInfo> > IDIVecs;
+ unsigned int CurIndex;
+
+public:
+ IdDeclInfoMap() : CurIndex(VECTOR_SIZE) {}
+
+ /// Returns the IdDeclInfo associated to the DeclarationName.
+ /// It creates a new IdDeclInfo if one was not created before for this id.
+ IdDeclInfo &operator[](DeclarationName Name);
+};
+
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfo Implementation
+//===----------------------------------------------------------------------===//
+
+/// AddShadowed - Add a decl by putting it directly above the 'Shadow' decl.
+/// Later lookups will find the 'Shadow' decl first. The 'Shadow' decl must
+/// be already added to the scope chain and must be in the same context as
+/// the decl that we want to add.
+void IdentifierResolver::IdDeclInfo::AddShadowed(NamedDecl *D,
+ NamedDecl *Shadow) {
+ for (DeclsTy::iterator I = Decls.end(); I != Decls.begin(); --I) {
+ if (Shadow == *(I-1)) {
+ Decls.insert(I-1, D);
+ return;
+ }
+ }
+
+ assert(0 && "Shadow wasn't in scope chain!");
+}
+
+/// RemoveDecl - Remove the decl from the scope chain.
+/// The decl must already be part of the decl chain.
+void IdentifierResolver::IdDeclInfo::RemoveDecl(NamedDecl *D) {
+ for (DeclsTy::iterator I = Decls.end(); I != Decls.begin(); --I) {
+ if (D == *(I-1)) {
+ Decls.erase(I-1);
+ return;
+ }
+ }
+
+ assert(0 && "Didn't find this decl on its identifier's chain!");
+}
+
+bool
+IdentifierResolver::IdDeclInfo::ReplaceDecl(NamedDecl *Old, NamedDecl *New) {
+ for (DeclsTy::iterator I = Decls.end(); I != Decls.begin(); --I) {
+ if (Old == *(I-1)) {
+ *(I - 1) = New;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// IdentifierResolver Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierResolver::IdentifierResolver(const LangOptions &langOpt)
+ : LangOpt(langOpt), IdDeclInfos(new IdDeclInfoMap) {
+}
+IdentifierResolver::~IdentifierResolver() {
+ delete IdDeclInfos;
+}
+
+/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
+/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
+/// true if 'D' belongs to the given declaration context.
+bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx,
+ ASTContext &Context, Scope *S) const {
+ Ctx = Ctx->getLookupContext();
+
+ if (Ctx->isFunctionOrMethod()) {
+ // Ignore the scopes associated within transparent declaration contexts.
+ while (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext())
+ S = S->getParent();
+
+ if (S->isDeclScope(Action::DeclPtrTy::make(D)))
+ return true;
+ if (LangOpt.CPlusPlus) {
+ // C++ 3.3.2p3:
+ // The name declared in a catch exception-declaration is local to the
+ // handler and shall not be redeclared in the outermost block of the
+ // handler.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement), and shall not be
+ // redeclared in a subsequent condition of that statement nor in the
+ // outermost block (or, for the if statement, any of the outermost blocks)
+ // of the controlled statement.
+ //
+ assert(S->getParent() && "No TUScope?");
+ if (S->getParent()->getFlags() & Scope::ControlScope)
+ return S->getParent()->isDeclScope(Action::DeclPtrTy::make(D));
+ }
+ return false;
+ }
+
+ return D->getDeclContext()->getLookupContext() == Ctx->getPrimaryContext();
+}
+
+/// AddDecl - Link the decl to its shadowed decl chain.
+void IdentifierResolver::AddDecl(NamedDecl *D) {
+ DeclarationName Name = D->getDeclName();
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ Name.setFETokenInfo(D);
+ return;
+ }
+
+ IdDeclInfo *IDI;
+
+ if (isDeclPtr(Ptr)) {
+ Name.setFETokenInfo(NULL);
+ IDI = &(*IdDeclInfos)[Name];
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+ IDI->AddDecl(PrevD);
+ } else
+ IDI = toIdDeclInfo(Ptr);
+
+ IDI->AddDecl(D);
+}
+
+/// AddShadowedDecl - Link the decl to its shadowed decl chain putting it
+/// after the decl that the iterator points to, thus the 'Shadow' decl will be
+/// encountered before the 'D' decl.
+void IdentifierResolver::AddShadowedDecl(NamedDecl *D, NamedDecl *Shadow) {
+ assert(D->getDeclName() == Shadow->getDeclName() && "Different ids!");
+
+ DeclarationName Name = D->getDeclName();
+ void *Ptr = Name.getFETokenInfo<void>();
+ assert(Ptr && "No decl from Ptr ?");
+
+ IdDeclInfo *IDI;
+
+ if (isDeclPtr(Ptr)) {
+ Name.setFETokenInfo(NULL);
+ IDI = &(*IdDeclInfos)[Name];
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+ assert(PrevD == Shadow && "Invalid shadow decl ?");
+ IDI->AddDecl(D);
+ IDI->AddDecl(PrevD);
+ return;
+ }
+
+ IDI = toIdDeclInfo(Ptr);
+ IDI->AddShadowed(D, Shadow);
+}
+
+/// RemoveDecl - Unlink the decl from its shadowed decl chain.
+/// The decl must already be part of the decl chain.
+void IdentifierResolver::RemoveDecl(NamedDecl *D) {
+ assert(D && "null param passed");
+ DeclarationName Name = D->getDeclName();
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ assert(Ptr && "Didn't find this decl on its identifier's chain!");
+
+ if (isDeclPtr(Ptr)) {
+ assert(D == Ptr && "Didn't find this decl on its identifier's chain!");
+ Name.setFETokenInfo(NULL);
+ return;
+ }
+
+ return toIdDeclInfo(Ptr)->RemoveDecl(D);
+}
+
+bool IdentifierResolver::ReplaceDecl(NamedDecl *Old, NamedDecl *New) {
+ assert(Old->getDeclName() == New->getDeclName() &&
+ "Cannot replace a decl with another decl of a different name");
+
+ DeclarationName Name = Old->getDeclName();
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr)
+ return false;
+
+ if (isDeclPtr(Ptr)) {
+ if (Ptr == Old) {
+ Name.setFETokenInfo(New);
+ return true;
+ }
+ return false;
+ }
+
+ return toIdDeclInfo(Ptr)->ReplaceDecl(Old, New);
+}
+
+/// begin - Returns an iterator for decls with name 'Name'.
+IdentifierResolver::iterator
+IdentifierResolver::begin(DeclarationName Name) {
+ void *Ptr = Name.getFETokenInfo<void>();
+ if (!Ptr) return end();
+
+ if (isDeclPtr(Ptr))
+ return iterator(static_cast<NamedDecl*>(Ptr));
+
+ IdDeclInfo *IDI = toIdDeclInfo(Ptr);
+
+ IdDeclInfo::DeclsTy::iterator I = IDI->decls_end();
+ if (I != IDI->decls_begin())
+ return iterator(I-1);
+ // No decls found.
+ return end();
+}
+
+void IdentifierResolver::AddDeclToIdentifierChain(IdentifierInfo *II,
+ NamedDecl *D) {
+ void *Ptr = II->getFETokenInfo<void>();
+
+ if (!Ptr) {
+ II->setFETokenInfo(D);
+ return;
+ }
+
+ IdDeclInfo *IDI;
+
+ if (isDeclPtr(Ptr)) {
+ II->setFETokenInfo(NULL);
+ IDI = &(*IdDeclInfos)[II];
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+ IDI->AddDecl(PrevD);
+ } else
+ IDI = toIdDeclInfo(Ptr);
+
+ IDI->AddDecl(D);
+}
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfoMap Implementation
+//===----------------------------------------------------------------------===//
+
+/// Returns the IdDeclInfo associated to the DeclarationName.
+/// It creates a new IdDeclInfo if one was not created before for this id.
+IdentifierResolver::IdDeclInfo &
+IdentifierResolver::IdDeclInfoMap::operator[](DeclarationName Name) {
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (Ptr) return *toIdDeclInfo(Ptr);
+
+ if (CurIndex == VECTOR_SIZE) {
+ // Add a IdDeclInfo vector 'pool'
+ IDIVecs.push_back(std::vector<IdDeclInfo>());
+ // Fill the vector
+ IDIVecs.back().resize(VECTOR_SIZE);
+ CurIndex = 0;
+ }
+ IdDeclInfo *IDI = &IDIVecs.back()[CurIndex];
+ Name.setFETokenInfo(reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(IDI) | 0x1)
+ );
+ ++CurIndex;
+ return *IDI;
+}
diff --git a/lib/Sema/IdentifierResolver.h b/lib/Sema/IdentifierResolver.h
new file mode 100644
index 0000000..0b0e6b3
--- /dev/null
+++ b/lib/Sema/IdentifierResolver.h
@@ -0,0 +1,214 @@
+//===- IdentifierResolver.h - Lexical Scope Name lookup ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IdentifierResolver class, which is used for lexical
+// scoped lookup, based on declaration names.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_SEMA_IDENTIFIERRESOLVER_H
+#define LLVM_CLANG_AST_SEMA_IDENTIFIERRESOLVER_H
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Parse/Scope.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DeclCXX.h"
+
+namespace clang {
+
+/// IdentifierResolver - Keeps track of shadowed decls on enclosing
+/// scopes. It manages the shadowing chains of declaration names and
+/// implements efficent decl lookup based on a declaration name.
+class IdentifierResolver {
+
+ /// IdDeclInfo - Keeps track of information about decls associated
+ /// to a particular declaration name. IdDeclInfos are lazily
+ /// constructed and assigned to a declaration name the first time a
+ /// decl with that declaration name is shadowed in some scope.
+ class IdDeclInfo {
+ public:
+ typedef llvm::SmallVector<NamedDecl*, 2> DeclsTy;
+
+ inline DeclsTy::iterator decls_begin() { return Decls.begin(); }
+ inline DeclsTy::iterator decls_end() { return Decls.end(); }
+
+ void AddDecl(NamedDecl *D) { Decls.push_back(D); }
+
+ /// AddShadowed - Add a decl by putting it directly above the 'Shadow' decl.
+ /// Later lookups will find the 'Shadow' decl first. The 'Shadow' decl must
+ /// be already added to the scope chain and must be in the same context as
+ /// the decl that we want to add.
+ void AddShadowed(NamedDecl *D, NamedDecl *Shadow);
+
+ /// RemoveDecl - Remove the decl from the scope chain.
+ /// The decl must already be part of the decl chain.
+ void RemoveDecl(NamedDecl *D);
+
+ /// Replaces the Old declaration with the New declaration. If the
+ /// replacement is successful, returns true. If the old
+ /// declaration was not found, returns false.
+ bool ReplaceDecl(NamedDecl *Old, NamedDecl *New);
+
+ private:
+ DeclsTy Decls;
+ };
+
+public:
+
+ /// iterator - Iterate over the decls of a specified declaration name.
+ /// It will walk or not the parent declaration contexts depending on how
+ /// it was instantiated.
+ class iterator {
+ public:
+ typedef NamedDecl * value_type;
+ typedef NamedDecl * reference;
+ typedef NamedDecl * pointer;
+ typedef std::input_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ /// Ptr - There are 3 forms that 'Ptr' represents:
+ /// 1) A single NamedDecl. (Ptr & 0x1 == 0)
+ /// 2) A IdDeclInfo::DeclsTy::iterator that traverses only the decls of the
+ /// same declaration context. (Ptr & 0x3 == 0x1)
+ /// 3) A IdDeclInfo::DeclsTy::iterator that traverses the decls of parent
+ /// declaration contexts too. (Ptr & 0x3 == 0x3)
+ uintptr_t Ptr;
+ typedef IdDeclInfo::DeclsTy::iterator BaseIter;
+
+ /// A single NamedDecl. (Ptr & 0x1 == 0)
+ iterator(NamedDecl *D) {
+ Ptr = reinterpret_cast<uintptr_t>(D);
+ assert((Ptr & 0x1) == 0 && "Invalid Ptr!");
+ }
+ /// A IdDeclInfo::DeclsTy::iterator that walks or not the parent declaration
+ /// contexts depending on 'LookInParentCtx'.
+ iterator(BaseIter I) {
+ Ptr = reinterpret_cast<uintptr_t>(I) | 0x1;
+ }
+
+ bool isIterator() const { return (Ptr & 0x1); }
+
+ BaseIter getIterator() const {
+ assert(isIterator() && "Ptr not an iterator!");
+ return reinterpret_cast<BaseIter>(Ptr & ~0x3);
+ }
+
+ friend class IdentifierResolver;
+ public:
+ iterator() : Ptr(0) {}
+
+ NamedDecl *operator*() const {
+ if (isIterator())
+ return *getIterator();
+ else
+ return reinterpret_cast<NamedDecl*>(Ptr);
+ }
+
+ bool operator==(const iterator &RHS) const {
+ return Ptr == RHS.Ptr;
+ }
+ bool operator!=(const iterator &RHS) const {
+ return Ptr != RHS.Ptr;
+ }
+
+ // Preincrement.
+ iterator& operator++() {
+ if (!isIterator()) // common case.
+ Ptr = 0;
+ else {
+ NamedDecl *D = **this;
+ void *InfoPtr = D->getDeclName().getFETokenInfo<void>();
+ assert(!isDeclPtr(InfoPtr) && "Decl with wrong id ?");
+ IdDeclInfo *Info = toIdDeclInfo(InfoPtr);
+
+ BaseIter I = getIterator();
+ if (I != Info->decls_begin())
+ *this = iterator(I-1);
+ else // No more decls.
+ *this = iterator();
+ }
+ return *this;
+ }
+
+ uintptr_t getAsOpaqueValue() const { return Ptr; }
+
+ static iterator getFromOpaqueValue(uintptr_t P) {
+ iterator Result;
+ Result.Ptr = P;
+ return Result;
+ }
+ };
+
+ /// begin - Returns an iterator for decls with the name 'Name'.
+ static iterator begin(DeclarationName Name);
+
+ /// end - Returns an iterator that has 'finished'.
+ static iterator end() {
+ return iterator();
+ }
+
+ /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
+ /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
+ /// true if 'D' belongs to the given declaration context.
+ bool isDeclInScope(Decl *D, DeclContext *Ctx, ASTContext &Context,
+ Scope *S = 0) const;
+
+ /// AddDecl - Link the decl to its shadowed decl chain.
+ void AddDecl(NamedDecl *D);
+
+ /// AddShadowedDecl - Link the decl to its shadowed decl chain putting it
+ /// after the decl that the iterator points to, thus the 'Shadow' decl will be
+ /// encountered before the 'D' decl.
+ void AddShadowedDecl(NamedDecl *D, NamedDecl *Shadow);
+
+ /// RemoveDecl - Unlink the decl from its shadowed decl chain.
+ /// The decl must already be part of the decl chain.
+ void RemoveDecl(NamedDecl *D);
+
+ /// Replace the decl Old with the new declaration New on its
+ /// identifier chain. Returns true if the old declaration was found
+ /// (and, therefore, replaced).
+ bool ReplaceDecl(NamedDecl *Old, NamedDecl *New);
+
+ /// \brief Link the declaration into the chain of declarations for
+ /// the given identifier.
+ ///
+ /// This is a lower-level routine used by the PCH reader to link a
+ /// declaration into a specific IdentifierInfo before the
+ /// declaration actually has a name.
+ void AddDeclToIdentifierChain(IdentifierInfo *II, NamedDecl *D);
+
+ explicit IdentifierResolver(const LangOptions &LangOpt);
+ ~IdentifierResolver();
+
+private:
+ const LangOptions &LangOpt;
+
+ class IdDeclInfoMap;
+ IdDeclInfoMap *IdDeclInfos;
+
+ /// FETokenInfo contains a Decl pointer if lower bit == 0.
+ static inline bool isDeclPtr(void *Ptr) {
+ return (reinterpret_cast<uintptr_t>(Ptr) & 0x1) == 0;
+ }
+
+ /// FETokenInfo contains a IdDeclInfo pointer if lower bit == 1.
+ static inline IdDeclInfo *toIdDeclInfo(void *Ptr) {
+ assert((reinterpret_cast<uintptr_t>(Ptr) & 0x1) == 1
+ && "Ptr not a IdDeclInfo* !");
+ return reinterpret_cast<IdDeclInfo*>(
+ reinterpret_cast<uintptr_t>(Ptr) & ~0x1
+ );
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
new file mode 100644
index 0000000..ae863f2
--- /dev/null
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -0,0 +1,327 @@
+//===--- JumpDiagnostics.cpp - Analyze Jump Targets for VLA issues --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the JumpScopeChecker class, which is used to diagnose
+// jumps that enter a VLA scope in an invalid way.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtCXX.h"
+using namespace clang;
+
+namespace {
+
+/// JumpScopeChecker - This object is used by Sema to diagnose invalid jumps
+/// into VLA and other protected scopes. For example, this rejects:
+/// goto L;
+/// int a[n];
+/// L:
+///
+class JumpScopeChecker {
+ Sema &S;
+
+ /// GotoScope - This is a record that we use to keep track of all of the
+ /// scopes that are introduced by VLAs and other things that scope jumps like
+ /// gotos. This scope tree has nothing to do with the source scope tree,
+ /// because you can have multiple VLA scopes per compound statement, and most
+ /// compound statements don't introduce any scopes.
+ struct GotoScope {
+ /// ParentScope - The index in ScopeMap of the parent scope. This is 0 for
+ /// the parent scope is the function body.
+ unsigned ParentScope;
+
+ /// Diag - The diagnostic to emit if there is a jump into this scope.
+ unsigned Diag;
+
+ /// Loc - Location to emit the diagnostic.
+ SourceLocation Loc;
+
+ GotoScope(unsigned parentScope, unsigned diag, SourceLocation L)
+ : ParentScope(parentScope), Diag(diag), Loc(L) {}
+ };
+
+ llvm::SmallVector<GotoScope, 48> Scopes;
+ llvm::DenseMap<Stmt*, unsigned> LabelAndGotoScopes;
+ llvm::SmallVector<Stmt*, 16> Jumps;
+public:
+ JumpScopeChecker(Stmt *Body, Sema &S);
+private:
+ void BuildScopeInformation(Stmt *S, unsigned ParentScope);
+ void VerifyJumps();
+ void CheckJump(Stmt *From, Stmt *To,
+ SourceLocation DiagLoc, unsigned JumpDiag);
+};
+} // end anonymous namespace
+
+
+JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s) : S(s) {
+ // Add a scope entry for function scope.
+ Scopes.push_back(GotoScope(~0U, ~0U, SourceLocation()));
+
+ // Build information for the top level compound statement, so that we have a
+ // defined scope record for every "goto" and label.
+ BuildScopeInformation(Body, 0);
+
+ // Check that all jumps we saw are kosher.
+ VerifyJumps();
+}
+
+/// GetDiagForGotoScopeDecl - If this decl induces a new goto scope, return a
+/// diagnostic that should be emitted if control goes over it. If not, return 0.
+static unsigned GetDiagForGotoScopeDecl(const Decl *D) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getType()->isVariablyModifiedType())
+ return diag::note_protected_by_vla;
+ if (VD->hasAttr<CleanupAttr>())
+ return diag::note_protected_by_cleanup;
+ } else if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
+ if (TD->getUnderlyingType()->isVariablyModifiedType())
+ return diag::note_protected_by_vla_typedef;
+ }
+
+ return 0;
+}
+
+
+/// BuildScopeInformation - The statements from CI to CE are known to form a
+/// coherent VLA scope with a specified parent node. Walk through the
+/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
+/// walking the AST as needed.
+void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
+
+ // If we found a label, remember that it is in ParentScope scope.
+ if (isa<LabelStmt>(S) || isa<DefaultStmt>(S) || isa<CaseStmt>(S)) {
+ LabelAndGotoScopes[S] = ParentScope;
+ } else if (isa<GotoStmt>(S) || isa<SwitchStmt>(S) ||
+ isa<IndirectGotoStmt>(S) || isa<AddrLabelExpr>(S)) {
+ // Remember both what scope a goto is in as well as the fact that we have
+ // it. This makes the second scan not have to walk the AST again.
+ LabelAndGotoScopes[S] = ParentScope;
+ Jumps.push_back(S);
+ }
+
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end(); CI != E;
+ ++CI) {
+ Stmt *SubStmt = *CI;
+ if (SubStmt == 0) continue;
+
+ // FIXME: diagnose jumps past initialization: required in C++, warning in C.
+ // goto L; int X = 4; L: ;
+
+ // If this is a declstmt with a VLA definition, it defines a scope from here
+ // to the end of the containing context.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) {
+ // The decl statement creates a scope if any of the decls in it are VLAs or
+ // have the cleanup attribute.
+ for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
+ I != E; ++I) {
+ // If this decl causes a new scope, push and switch to it.
+ if (unsigned Diag = GetDiagForGotoScopeDecl(*I)) {
+ Scopes.push_back(GotoScope(ParentScope, Diag, (*I)->getLocation()));
+ ParentScope = Scopes.size()-1;
+ }
+
+ // If the decl has an initializer, walk it with the potentially new
+ // scope we just installed.
+ if (VarDecl *VD = dyn_cast<VarDecl>(*I))
+ if (Expr *Init = VD->getInit())
+ BuildScopeInformation(Init, ParentScope);
+ }
+ continue;
+ }
+
+ // Disallow jumps into any part of an @try statement by pushing a scope and
+ // walking all sub-stmts in that scope.
+ if (ObjCAtTryStmt *AT = dyn_cast<ObjCAtTryStmt>(SubStmt)) {
+ // Recursively walk the AST for the @try part.
+ Scopes.push_back(GotoScope(ParentScope,diag::note_protected_by_objc_try,
+ AT->getAtTryLoc()));
+ if (Stmt *TryPart = AT->getTryBody())
+ BuildScopeInformation(TryPart, Scopes.size()-1);
+
+ // Jump from the catch to the finally or try is not valid.
+ for (ObjCAtCatchStmt *AC = AT->getCatchStmts(); AC;
+ AC = AC->getNextCatchStmt()) {
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_catch,
+ AC->getAtCatchLoc()));
+ // @catches are nested and it isn't
+ BuildScopeInformation(AC->getCatchBody(), Scopes.size()-1);
+ }
+
+ // Jump from the finally to the try or catch is not valid.
+ if (ObjCAtFinallyStmt *AF = AT->getFinallyStmt()) {
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_finally,
+ AF->getAtFinallyLoc()));
+ BuildScopeInformation(AF, Scopes.size()-1);
+ }
+
+ continue;
+ }
+
+ // Disallow jumps into the protected statement of an @synchronized, but
+ // allow jumps into the object expression it protects.
+ if (ObjCAtSynchronizedStmt *AS = dyn_cast<ObjCAtSynchronizedStmt>(SubStmt)){
+ // Recursively walk the AST for the @synchronized object expr, it is
+ // evaluated in the normal scope.
+ BuildScopeInformation(AS->getSynchExpr(), ParentScope);
+
+ // Recursively walk the AST for the @synchronized part, protected by a new
+ // scope.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_synchronized,
+ AS->getAtSynchronizedLoc()));
+ BuildScopeInformation(AS->getSynchBody(), Scopes.size()-1);
+ continue;
+ }
+
+ // Disallow jumps into any part of a C++ try statement. This is pretty
+ // much the same as for Obj-C.
+ if (CXXTryStmt *TS = dyn_cast<CXXTryStmt>(SubStmt)) {
+ Scopes.push_back(GotoScope(ParentScope, diag::note_protected_by_cxx_try,
+ TS->getSourceRange().getBegin()));
+ if (Stmt *TryBlock = TS->getTryBlock())
+ BuildScopeInformation(TryBlock, Scopes.size()-1);
+
+ // Jump from the catch into the try is not allowed either.
+ for(unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) {
+ CXXCatchStmt *CS = TS->getHandler(I);
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_cxx_catch,
+ CS->getSourceRange().getBegin()));
+ BuildScopeInformation(CS->getHandlerBlock(), Scopes.size()-1);
+ }
+
+ continue;
+ }
+
+ // Recursively walk the AST.
+ BuildScopeInformation(SubStmt, ParentScope);
+ }
+}
+
+/// VerifyJumps - Verify each element of the Jumps array to see if they are
+/// valid, emitting diagnostics if not.
+void JumpScopeChecker::VerifyJumps() {
+ while (!Jumps.empty()) {
+ Stmt *Jump = Jumps.pop_back_val();
+
+ // With a goto,
+ if (GotoStmt *GS = dyn_cast<GotoStmt>(Jump)) {
+ CheckJump(GS, GS->getLabel(), GS->getGotoLoc(),
+ diag::err_goto_into_protected_scope);
+ continue;
+ }
+
+ if (SwitchStmt *SS = dyn_cast<SwitchStmt>(Jump)) {
+ for (SwitchCase *SC = SS->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ assert(LabelAndGotoScopes.count(SC) && "Case not visited?");
+ CheckJump(SS, SC, SC->getLocStart(),
+ diag::err_switch_into_protected_scope);
+ }
+ continue;
+ }
+
+ unsigned DiagnosticScope;
+
+ // We don't know where an indirect goto goes, require that it be at the
+ // top level of scoping.
+ if (IndirectGotoStmt *IG = dyn_cast<IndirectGotoStmt>(Jump)) {
+ assert(LabelAndGotoScopes.count(Jump) &&
+ "Jump didn't get added to scopes?");
+ unsigned GotoScope = LabelAndGotoScopes[IG];
+ if (GotoScope == 0) continue; // indirect jump is ok.
+ S.Diag(IG->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
+ DiagnosticScope = GotoScope;
+ } else {
+ // We model &&Label as a jump for purposes of scope tracking. We actually
+ // don't care *where* the address of label is, but we require the *label
+ // itself* to be in scope 0. If it is nested inside of a VLA scope, then
+ // it is possible for an indirect goto to illegally enter the VLA scope by
+ // indirectly jumping to the label.
+ assert(isa<AddrLabelExpr>(Jump) && "Unknown jump type");
+ LabelStmt *TheLabel = cast<AddrLabelExpr>(Jump)->getLabel();
+
+ assert(LabelAndGotoScopes.count(TheLabel) &&
+ "Referenced label didn't get added to scopes?");
+ unsigned LabelScope = LabelAndGotoScopes[TheLabel];
+ if (LabelScope == 0) continue; // Addr of label is ok.
+
+ S.Diag(Jump->getLocStart(), diag::err_addr_of_label_in_protected_scope);
+ DiagnosticScope = LabelScope;
+ }
+
+ // Report all the things that would be skipped over by this &&label or
+ // indirect goto.
+ while (DiagnosticScope != 0) {
+ S.Diag(Scopes[DiagnosticScope].Loc, Scopes[DiagnosticScope].Diag);
+ DiagnosticScope = Scopes[DiagnosticScope].ParentScope;
+ }
+ }
+}
+
+/// CheckJump - Validate that the specified jump statement is valid: that it is
+/// jumping within or out of its current scope, not into a deeper one.
+void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To,
+ SourceLocation DiagLoc, unsigned JumpDiag) {
+ assert(LabelAndGotoScopes.count(From) && "Jump didn't get added to scopes?");
+ unsigned FromScope = LabelAndGotoScopes[From];
+
+ assert(LabelAndGotoScopes.count(To) && "Jump didn't get added to scopes?");
+ unsigned ToScope = LabelAndGotoScopes[To];
+
+ // Common case: exactly the same scope, which is fine.
+ if (FromScope == ToScope) return;
+
+ // The only valid mismatch jump case happens when the jump is more deeply
+ // nested inside the jump target. Do a quick scan to see if the jump is valid
+ // because valid code is more common than invalid code.
+ unsigned TestScope = Scopes[FromScope].ParentScope;
+ while (TestScope != ~0U) {
+ // If we found the jump target, then we're jumping out of our current scope,
+ // which is perfectly fine.
+ if (TestScope == ToScope) return;
+
+ // Otherwise, scan up the hierarchy.
+ TestScope = Scopes[TestScope].ParentScope;
+ }
+
+ // If we get here, then we know we have invalid code. Diagnose the bad jump,
+ // and then emit a note at each VLA being jumped out of.
+ S.Diag(DiagLoc, JumpDiag);
+
+ // Eliminate the common prefix of the jump and the target. Start by
+ // linearizing both scopes, reversing them as we go.
+ std::vector<unsigned> FromScopes, ToScopes;
+ for (TestScope = FromScope; TestScope != ~0U;
+ TestScope = Scopes[TestScope].ParentScope)
+ FromScopes.push_back(TestScope);
+ for (TestScope = ToScope; TestScope != ~0U;
+ TestScope = Scopes[TestScope].ParentScope)
+ ToScopes.push_back(TestScope);
+
+ // Remove any common entries (such as the top-level function scope).
+ while (!FromScopes.empty() && FromScopes.back() == ToScopes.back()) {
+ FromScopes.pop_back();
+ ToScopes.pop_back();
+ }
+
+ // Emit diagnostics for whatever is left in ToScopes.
+ for (unsigned i = 0, e = ToScopes.size(); i != e; ++i)
+ S.Diag(Scopes[ToScopes[i]].Loc, Scopes[ToScopes[i]].Diag);
+}
+
+void Sema::DiagnoseInvalidJumps(Stmt *Body) {
+ JumpScopeChecker(Body, *this);
+}
diff --git a/lib/Sema/Makefile b/lib/Sema/Makefile
new file mode 100644
index 0000000..0f4c796
--- /dev/null
+++ b/lib/Sema/Makefile
@@ -0,0 +1,23 @@
+##===- clang/lib/Sema/Makefile -----------------------------*- Makefile -*-===##
+#
+# The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+#
+##===----------------------------------------------------------------------===##
+#
+# This implements the semantic analyzer and AST builder library for the
+# C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangSema
+BUILD_ARCHIVE = 1
+CXXFLAGS = -fno-rtti
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/Sema/ParseAST.cpp b/lib/Sema/ParseAST.cpp
new file mode 100644
index 0000000..e2ee88a
--- /dev/null
+++ b/lib/Sema/ParseAST.cpp
@@ -0,0 +1,85 @@
+//===--- ParseAST.cpp - Provide the clang::ParseAST method ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the clang::ParseAST method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/ParseAST.h"
+#include "Sema.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Parse/Parser.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Public interface to the file
+//===----------------------------------------------------------------------===//
+
+/// ParseAST - Parse the entire file specified, notifying the ASTConsumer as
+/// the file is parsed. This inserts the parsed decls into the translation unit
+/// held by Ctx.
+///
+void clang::ParseAST(Preprocessor &PP, ASTConsumer *Consumer,
+ ASTContext &Ctx, bool PrintStats,
+ bool CompleteTranslationUnit) {
+ // Collect global stats on Decls/Stmts (until we have a module streamer).
+ if (PrintStats) {
+ Decl::CollectingStats(true);
+ Stmt::CollectingStats(true);
+ }
+
+ Sema S(PP, Ctx, *Consumer, CompleteTranslationUnit);
+ Parser P(PP, S);
+ PP.EnterMainSourceFile();
+
+ // Initialize the parser.
+ P.Initialize();
+
+ Consumer->Initialize(Ctx);
+
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumer))
+ SC->InitializeSema(S);
+
+ if (ExternalASTSource *External = Ctx.getExternalSource()) {
+ if (ExternalSemaSource *ExternalSema =
+ dyn_cast<ExternalSemaSource>(External))
+ ExternalSema->InitializeSema(S);
+
+ External->StartTranslationUnit(Consumer);
+ }
+
+ Parser::DeclGroupPtrTy ADecl;
+
+ while (!P.ParseTopLevelDecl(ADecl)) { // Not end of file.
+ // If we got a null return and something *was* parsed, ignore it. This
+ // is due to a top-level semicolon, an action override, or a parse error
+ // skipping something.
+ if (ADecl)
+ Consumer->HandleTopLevelDecl(ADecl.getAsVal<DeclGroupRef>());
+ };
+
+ Consumer->HandleTranslationUnit(Ctx);
+
+ if (PrintStats) {
+ fprintf(stderr, "\nSTATISTICS:\n");
+ P.getActions().PrintStats();
+ Ctx.PrintStats();
+ Decl::PrintStats();
+ Stmt::PrintStats();
+ Consumer->PrintStats();
+
+ Decl::CollectingStats(false);
+ Stmt::CollectingStats(false);
+ }
+}
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
new file mode 100644
index 0000000..1212d07
--- /dev/null
+++ b/lib/Sema/Sema.cpp
@@ -0,0 +1,333 @@
+//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the actions class which performs semantic analysis and
+// builds an AST out of a parse stream.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/TargetInfo.h"
+using namespace clang;
+
+/// ConvertQualTypeToStringFn - This function is used to pretty print the
+/// specified QualType as a string in diagnostics.
+static void ConvertArgToStringFn(Diagnostic::ArgumentKind Kind, intptr_t Val,
+ const char *Modifier, unsigned ModLen,
+ const char *Argument, unsigned ArgLen,
+ llvm::SmallVectorImpl<char> &Output,
+ void *Cookie) {
+ ASTContext &Context = *static_cast<ASTContext*>(Cookie);
+
+ std::string S;
+ if (Kind == Diagnostic::ak_qualtype) {
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for QualType argument");
+
+ QualType Ty(QualType::getFromOpaquePtr(reinterpret_cast<void*>(Val)));
+
+ // FIXME: Playing with std::string is really slow.
+ S = Ty.getAsString(Context.PrintingPolicy);
+
+ // If this is a sugared type (like a typedef, typeof, etc), then unwrap one
+ // level of the sugar so that the type is more obvious to the user.
+ QualType DesugaredTy = Ty->getDesugaredType(true);
+ DesugaredTy.setCVRQualifiers(DesugaredTy.getCVRQualifiers() |
+ Ty.getCVRQualifiers());
+
+ if (Ty != DesugaredTy &&
+ // If the desugared type is a vector type, we don't want to expand it,
+ // it will turn into an attribute mess. People want their "vec4".
+ !isa<VectorType>(DesugaredTy) &&
+
+ // Don't desugar magic Objective-C types.
+ Ty.getUnqualifiedType() != Context.getObjCIdType() &&
+ Ty.getUnqualifiedType() != Context.getObjCSelType() &&
+ Ty.getUnqualifiedType() != Context.getObjCProtoType() &&
+ Ty.getUnqualifiedType() != Context.getObjCClassType() &&
+
+ // Not va_list.
+ Ty.getUnqualifiedType() != Context.getBuiltinVaListType()) {
+ S = "'"+S+"' (aka '";
+ S += DesugaredTy.getAsString();
+ S += "')";
+ Output.append(S.begin(), S.end());
+ return;
+ }
+
+ } else if (Kind == Diagnostic::ak_declarationname) {
+
+ DeclarationName N = DeclarationName::getFromOpaqueInteger(Val);
+ S = N.getAsString();
+
+ if (ModLen == 9 && !memcmp(Modifier, "objcclass", 9) && ArgLen == 0)
+ S = '+' + S;
+ else if (ModLen == 12 && !memcmp(Modifier, "objcinstance", 12) && ArgLen==0)
+ S = '-' + S;
+ else
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for DeclarationName argument");
+ } else {
+ assert(Kind == Diagnostic::ak_nameddecl);
+ if (ModLen == 1 && Modifier[0] == 'q' && ArgLen == 0)
+ S = reinterpret_cast<NamedDecl*>(Val)->getQualifiedNameAsString();
+ else {
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for NamedDecl* argument");
+ S = reinterpret_cast<NamedDecl*>(Val)->getNameAsString();
+ }
+ }
+
+ Output.push_back('\'');
+ Output.append(S.begin(), S.end());
+ Output.push_back('\'');
+}
+
+
+static inline RecordDecl *CreateStructDecl(ASTContext &C, const char *Name) {
+ if (C.getLangOptions().CPlusPlus)
+ return CXXRecordDecl::Create(C, TagDecl::TK_struct,
+ C.getTranslationUnitDecl(),
+ SourceLocation(), &C.Idents.get(Name));
+
+ return RecordDecl::Create(C, TagDecl::TK_struct,
+ C.getTranslationUnitDecl(),
+ SourceLocation(), &C.Idents.get(Name));
+}
+
+void Sema::ActOnTranslationUnitScope(SourceLocation Loc, Scope *S) {
+ TUScope = S;
+ PushDeclContext(S, Context.getTranslationUnitDecl());
+
+ if (PP.getTargetInfo().getPointerWidth(0) >= 64) {
+ // Install [u]int128_t for 64-bit targets.
+ PushOnScopeChains(TypedefDecl::Create(Context, CurContext,
+ SourceLocation(),
+ &Context.Idents.get("__int128_t"),
+ Context.Int128Ty), TUScope);
+ PushOnScopeChains(TypedefDecl::Create(Context, CurContext,
+ SourceLocation(),
+ &Context.Idents.get("__uint128_t"),
+ Context.UnsignedInt128Ty), TUScope);
+ }
+
+
+ if (!PP.getLangOptions().ObjC1) return;
+
+ if (Context.getObjCSelType().isNull()) {
+ // Synthesize "typedef struct objc_selector *SEL;"
+ RecordDecl *SelTag = CreateStructDecl(Context, "objc_selector");
+ PushOnScopeChains(SelTag, TUScope);
+
+ QualType SelT = Context.getPointerType(Context.getTagDeclType(SelTag));
+ TypedefDecl *SelTypedef = TypedefDecl::Create(Context, CurContext,
+ SourceLocation(),
+ &Context.Idents.get("SEL"),
+ SelT);
+ PushOnScopeChains(SelTypedef, TUScope);
+ Context.setObjCSelType(Context.getTypeDeclType(SelTypedef));
+ }
+
+ if (Context.getObjCClassType().isNull()) {
+ RecordDecl *ClassTag = CreateStructDecl(Context, "objc_class");
+ QualType ClassT = Context.getPointerType(Context.getTagDeclType(ClassTag));
+ TypedefDecl *ClassTypedef =
+ TypedefDecl::Create(Context, CurContext, SourceLocation(),
+ &Context.Idents.get("Class"), ClassT);
+ PushOnScopeChains(ClassTag, TUScope);
+ PushOnScopeChains(ClassTypedef, TUScope);
+ Context.setObjCClassType(Context.getTypeDeclType(ClassTypedef));
+ }
+
+ // Synthesize "@class Protocol;
+ if (Context.getObjCProtoType().isNull()) {
+ ObjCInterfaceDecl *ProtocolDecl =
+ ObjCInterfaceDecl::Create(Context, CurContext, SourceLocation(),
+ &Context.Idents.get("Protocol"),
+ SourceLocation(), true);
+ Context.setObjCProtoType(Context.getObjCInterfaceType(ProtocolDecl));
+ PushOnScopeChains(ProtocolDecl, TUScope);
+ }
+
+ // Synthesize "typedef struct objc_object { Class isa; } *id;"
+ if (Context.getObjCIdType().isNull()) {
+ RecordDecl *ObjectTag = CreateStructDecl(Context, "objc_object");
+
+ QualType ObjT = Context.getPointerType(Context.getTagDeclType(ObjectTag));
+ PushOnScopeChains(ObjectTag, TUScope);
+ TypedefDecl *IdTypedef = TypedefDecl::Create(Context, CurContext,
+ SourceLocation(),
+ &Context.Idents.get("id"),
+ ObjT);
+ PushOnScopeChains(IdTypedef, TUScope);
+ Context.setObjCIdType(Context.getTypeDeclType(IdTypedef));
+ }
+}
+
+Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
+ bool CompleteTranslationUnit)
+ : LangOpts(pp.getLangOptions()), PP(pp), Context(ctxt), Consumer(consumer),
+ Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
+ ExternalSource(0), CurContext(0), PreDeclaratorDC(0),
+ CurBlock(0), PackContext(0), IdResolver(pp.getLangOptions()),
+ GlobalNewDeleteDeclared(false),
+ CompleteTranslationUnit(CompleteTranslationUnit),
+ CurrentInstantiationScope(0) {
+
+ StdNamespace = 0;
+ TUScope = 0;
+ if (getLangOptions().CPlusPlus)
+ FieldCollector.reset(new CXXFieldCollector());
+
+ // Tell diagnostics how to render things from the AST library.
+ PP.getDiagnostics().SetArgToStringFn(ConvertArgToStringFn, &Context);
+}
+
+/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
+/// If there is already an implicit cast, merge into the existing one.
+/// If isLvalue, the result of the cast is an lvalue.
+void Sema::ImpCastExprToType(Expr *&Expr, QualType Ty, bool isLvalue) {
+ QualType ExprTy = Context.getCanonicalType(Expr->getType());
+ QualType TypeTy = Context.getCanonicalType(Ty);
+
+ if (ExprTy == TypeTy)
+ return;
+
+ if (Expr->getType().getTypePtr()->isPointerType() &&
+ Ty.getTypePtr()->isPointerType()) {
+ QualType ExprBaseType =
+ cast<PointerType>(ExprTy.getUnqualifiedType())->getPointeeType();
+ QualType BaseType =
+ cast<PointerType>(TypeTy.getUnqualifiedType())->getPointeeType();
+ if (ExprBaseType.getAddressSpace() != BaseType.getAddressSpace()) {
+ Diag(Expr->getExprLoc(), diag::err_implicit_pointer_address_space_cast)
+ << Expr->getSourceRange();
+ }
+ }
+
+ if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(Expr)) {
+ ImpCast->setType(Ty);
+ ImpCast->setLvalueCast(isLvalue);
+ } else
+ Expr = new (Context) ImplicitCastExpr(Ty, Expr, isLvalue);
+}
+
+void Sema::DeleteExpr(ExprTy *E) {
+ if (E) static_cast<Expr*>(E)->Destroy(Context);
+}
+void Sema::DeleteStmt(StmtTy *S) {
+ if (S) static_cast<Stmt*>(S)->Destroy(Context);
+}
+
+/// ActOnEndOfTranslationUnit - This is called at the very end of the
+/// translation unit when EOF is reached and all but the top-level scope is
+/// popped.
+void Sema::ActOnEndOfTranslationUnit() {
+ if (!CompleteTranslationUnit)
+ return;
+
+ // C99 6.9.2p2:
+ // A declaration of an identifier for an object that has file
+ // scope without an initializer, and without a storage-class
+ // specifier or with the storage-class specifier static,
+ // constitutes a tentative definition. If a translation unit
+ // contains one or more tentative definitions for an identifier,
+ // and the translation unit contains no external definition for
+ // that identifier, then the behavior is exactly as if the
+ // translation unit contains a file scope declaration of that
+ // identifier, with the composite type as of the end of the
+ // translation unit, with an initializer equal to 0.
+ for (llvm::DenseMap<DeclarationName, VarDecl *>::iterator
+ D = TentativeDefinitions.begin(),
+ DEnd = TentativeDefinitions.end();
+ D != DEnd; ++D) {
+ VarDecl *VD = D->second;
+
+ if (VD->isInvalidDecl() || !VD->isTentativeDefinition(Context))
+ continue;
+
+ if (const IncompleteArrayType *ArrayT
+ = Context.getAsIncompleteArrayType(VD->getType())) {
+ if (RequireCompleteType(VD->getLocation(),
+ ArrayT->getElementType(),
+ diag::err_tentative_def_incomplete_type_arr))
+ VD->setInvalidDecl();
+ else {
+ // Set the length of the array to 1 (C99 6.9.2p5).
+ Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
+ llvm::APInt One(Context.getTypeSize(Context.getSizeType()),
+ true);
+ QualType T
+ = Context.getConstantArrayType(ArrayT->getElementType(),
+ One, ArrayType::Normal, 0);
+ VD->setType(T);
+ }
+ } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
+ diag::err_tentative_def_incomplete_type))
+ VD->setInvalidDecl();
+
+ // Notify the consumer that we've completed a tentative definition.
+ if (!VD->isInvalidDecl())
+ Consumer.CompleteTentativeDefinition(VD);
+
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Helper functions.
+//===----------------------------------------------------------------------===//
+
+/// getCurFunctionDecl - If inside of a function body, this returns a pointer
+/// to the function decl for the function being parsed. If we're currently
+/// in a 'block', this returns the containing context.
+FunctionDecl *Sema::getCurFunctionDecl() {
+ DeclContext *DC = CurContext;
+ while (isa<BlockDecl>(DC))
+ DC = DC->getParent();
+ return dyn_cast<FunctionDecl>(DC);
+}
+
+ObjCMethodDecl *Sema::getCurMethodDecl() {
+ DeclContext *DC = CurContext;
+ while (isa<BlockDecl>(DC))
+ DC = DC->getParent();
+ return dyn_cast<ObjCMethodDecl>(DC);
+}
+
+NamedDecl *Sema::getCurFunctionOrMethodDecl() {
+ DeclContext *DC = CurContext;
+ while (isa<BlockDecl>(DC))
+ DC = DC->getParent();
+ if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
+ return cast<NamedDecl>(DC);
+ return 0;
+}
+
+Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
+ this->Emit();
+
+ // If this is not a note, and we're in a template instantiation
+ // that is different from the last template instantiation where
+ // we emitted an error, print a template instantiation
+ // backtrace.
+ if (!SemaRef.Diags.isBuiltinNote(DiagID) &&
+ !SemaRef.ActiveTemplateInstantiations.empty() &&
+ SemaRef.ActiveTemplateInstantiations.back()
+ != SemaRef.LastTemplateInstantiationErrorContext) {
+ SemaRef.PrintInstantiationStack();
+ SemaRef.LastTemplateInstantiationErrorContext
+ = SemaRef.ActiveTemplateInstantiations.back();
+ }
+}
diff --git a/lib/Sema/Sema.h b/lib/Sema/Sema.h
new file mode 100644
index 0000000..c428d29
--- /dev/null
+++ b/lib/Sema/Sema.h
@@ -0,0 +1,2814 @@
+//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Sema class, which performs semantic analysis and
+// builds ASTs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_SEMA_H
+#define LLVM_CLANG_AST_SEMA_H
+
+#include "IdentifierResolver.h"
+#include "CXXFieldCollector.h"
+#include "SemaOverload.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Parse/Action.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+ class APSInt;
+}
+
+namespace clang {
+ class ASTContext;
+ class ASTConsumer;
+ class Preprocessor;
+ class Decl;
+ class DeclContext;
+ class DeclSpec;
+ class ExternalSemaSource;
+ class NamedDecl;
+ class Stmt;
+ class Expr;
+ class InitListExpr;
+ class DesignatedInitExpr;
+ class CallExpr;
+ class DeclRefExpr;
+ class VarDecl;
+ class ParmVarDecl;
+ class TypedefDecl;
+ class FunctionDecl;
+ class QualType;
+ class LangOptions;
+ class Token;
+ class IntegerLiteral;
+ class StringLiteral;
+ class ArrayType;
+ class LabelStmt;
+ class SwitchStmt;
+ class CXXTryStmt;
+ class ExtVectorType;
+ class TypedefDecl;
+ class TemplateDecl;
+ class TemplateArgument;
+ class TemplateArgumentList;
+ class TemplateParameterList;
+ class TemplateTemplateParmDecl;
+ class ClassTemplatePartialSpecializationDecl;
+ class ClassTemplateDecl;
+ class ObjCInterfaceDecl;
+ class ObjCCompatibleAliasDecl;
+ class ObjCProtocolDecl;
+ class ObjCImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCCategoryDecl;
+ class ObjCIvarDecl;
+ class ObjCMethodDecl;
+ class ObjCPropertyDecl;
+ class ObjCContainerDecl;
+ class BasePaths;
+ struct MemberLookupCriteria;
+ class CXXTemporary;
+
+/// BlockSemaInfo - When a block is being parsed, this contains information
+/// about the block. It is pointed to from Sema::CurBlock.
+struct BlockSemaInfo {
+ llvm::SmallVector<ParmVarDecl*, 8> Params;
+ bool hasPrototype;
+ bool isVariadic;
+ bool hasBlockDeclRefExprs;
+
+ BlockDecl *TheDecl;
+
+ /// TheScope - This is the scope for the block itself, which contains
+ /// arguments etc.
+ Scope *TheScope;
+
+ /// ReturnType - This will get set to block result type, by looking at
+ /// return types, if any, in the block body.
+ Type *ReturnType;
+
+ /// LabelMap - This is a mapping from label identifiers to the LabelStmt for
+ /// it (which acts like the label decl in some ways). Forward referenced
+ /// labels have a LabelStmt created for them with a null location & SubStmt.
+ llvm::DenseMap<IdentifierInfo*, LabelStmt*> LabelMap;
+
+ /// SwitchStack - This is the current set of active switch statements in the
+ /// block.
+ llvm::SmallVector<SwitchStmt*, 8> SwitchStack;
+
+ /// SavedFunctionNeedsScopeChecking - This is the value of
+ /// CurFunctionNeedsScopeChecking at the point when the block started.
+ bool SavedFunctionNeedsScopeChecking;
+
+ /// PrevBlockInfo - If this is nested inside another block, this points
+ /// to the outer block.
+ BlockSemaInfo *PrevBlockInfo;
+};
+
+/// Sema - This implements semantic analysis and AST building for C.
+class Sema : public Action {
+ Sema(const Sema&); // DO NOT IMPLEMENT
+ void operator=(const Sema&); // DO NOT IMPLEMENT
+public:
+ const LangOptions &LangOpts;
+ Preprocessor &PP;
+ ASTContext &Context;
+ ASTConsumer &Consumer;
+ Diagnostic &Diags;
+ SourceManager &SourceMgr;
+
+ /// \brief Source of additional semantic information.
+ ExternalSemaSource *ExternalSource;
+
+ /// CurContext - This is the current declaration context of parsing.
+ DeclContext *CurContext;
+
+ /// PreDeclaratorDC - Keeps the declaration context before switching to the
+ /// context of a declarator's nested-name-specifier.
+ DeclContext *PreDeclaratorDC;
+
+ /// CurBlock - If inside of a block definition, this contains a pointer to
+ /// the active block object that represents it.
+ BlockSemaInfo *CurBlock;
+
+ /// PackContext - Manages the stack for #pragma pack. An alignment
+ /// of 0 indicates default alignment.
+ void *PackContext; // Really a "PragmaPackStack*"
+
+ /// FunctionLabelMap - This is a mapping from label identifiers to the
+ /// LabelStmt for it (which acts like the label decl in some ways). Forward
+ /// referenced labels have a LabelStmt created for them with a null location &
+ /// SubStmt.
+ ///
+ /// Note that this should always be accessed through getLabelMap() in order
+ /// to handle blocks properly.
+ llvm::DenseMap<IdentifierInfo*, LabelStmt*> FunctionLabelMap;
+
+ /// FunctionSwitchStack - This is the current set of active switch statements
+ /// in the top level function. Clients should always use getSwitchStack() to
+ /// handle the case when they are in a block.
+ llvm::SmallVector<SwitchStmt*, 8> FunctionSwitchStack;
+
+ /// ExprTemporaries - This is the stack of temporaries that are created by
+ /// the current full expression.
+ llvm::SmallVector<CXXTemporary*, 8> ExprTemporaries;
+
+ /// CurFunctionNeedsScopeChecking - This is set to true when a function or
+ /// ObjC method body contains a VLA or an ObjC try block, which introduce
+ /// scopes that need to be checked for goto conditions. If a function does
+ /// not contain this, then it need not have the jump checker run on it.
+ bool CurFunctionNeedsScopeChecking;
+
+ /// ExtVectorDecls - This is a list all the extended vector types. This allows
+ /// us to associate a raw vector type with one of the ext_vector type names.
+ /// This is only necessary for issuing pretty diagnostics.
+ llvm::SmallVector<TypedefDecl*, 24> ExtVectorDecls;
+
+ /// ObjCCategoryImpls - Maintain a list of category implementations so
+ /// we can check for duplicates and find local method declarations.
+ llvm::SmallVector<ObjCCategoryImplDecl*, 8> ObjCCategoryImpls;
+
+ /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
+ llvm::OwningPtr<CXXFieldCollector> FieldCollector;
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
+
+ /// PureVirtualClassDiagSet - a set of class declarations which we have
+ /// emitted a list of pure virtual functions. Used to prevent emitting the
+ /// same list more than once.
+ llvm::OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet;
+
+ /// \brief A mapping from external names to the most recent
+ /// locally-scoped external declaration with that name.
+ ///
+ /// This map contains external declarations introduced in local
+ /// scoped, e.g.,
+ ///
+ /// \code
+ /// void f() {
+ /// void foo(int, int);
+ /// }
+ /// \endcode
+ ///
+ /// Here, the name "foo" will be associated with the declaration on
+ /// "foo" within f. This name is not visible outside of
+ /// "f". However, we still find it in two cases:
+ ///
+ /// - If we are declaring another external with the name "foo", we
+ /// can find "foo" as a previous declaration, so that the types
+ /// of this external declaration can be checked for
+ /// compatibility.
+ ///
+ /// - If we would implicitly declare "foo" (e.g., due to a call to
+ /// "foo" in C when no prototype or definition is visible), then
+ /// we find this declaration of "foo" and complain that it is
+ /// not visible.
+ llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternalDecls;
+
+ /// \brief The set of tentative declarations seen so far in this
+ /// translation unit for which no definition has been seen.
+ ///
+ /// The tentative declarations are indexed by the name of the
+ /// declaration, and only the most recent tentative declaration for
+ /// a given variable will be recorded here.
+ llvm::DenseMap<DeclarationName, VarDecl *> TentativeDefinitions;
+
+ IdentifierResolver IdResolver;
+
+ /// Translation Unit Scope - useful to Objective-C actions that need
+ /// to lookup file scope declarations in the "ordinary" C decl namespace.
+ /// For example, user-defined classes, built-in "id" type, etc.
+ Scope *TUScope;
+
+ /// The C++ "std" namespace, where the standard library resides. Cached here
+ /// by GetStdNamespace
+ NamespaceDecl *StdNamespace;
+
+ /// A flag to remember whether the implicit forms of operator new and delete
+ /// have been declared.
+ bool GlobalNewDeleteDeclared;
+
+ /// \brief Whether the code handled by Sema should be considered a
+ /// complete translation unit or not.
+ ///
+ /// When true (which is generally the case), Sema will perform
+ /// end-of-translation-unit semantic tasks (such as creating
+ /// initializers for tentative definitions in C) once parsing has
+ /// completed. This flag will be false when building PCH files,
+ /// since a PCH file is by definition not a complete translation
+ /// unit.
+ bool CompleteTranslationUnit;
+
+ typedef llvm::DenseMap<Selector, ObjCMethodList> MethodPool;
+
+ /// Instance/Factory Method Pools - allows efficient lookup when typechecking
+ /// messages to "id". We need to maintain a list, since selectors can have
+ /// differing signatures across classes. In Cocoa, this happens to be
+ /// extremely uncommon (only 1% of selectors are "overloaded").
+ MethodPool InstanceMethodPool;
+ MethodPool FactoryMethodPool;
+
+ MethodPool::iterator ReadMethodPool(Selector Sel, bool isInstance);
+
+ /// Private Helper predicate to check for 'self'.
+ bool isSelfExpr(Expr *RExpr);
+public:
+ Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
+ bool CompleteTranslationUnit = true);
+ ~Sema() {
+ if (PackContext) FreePackedContext();
+ }
+
+ const LangOptions &getLangOptions() const { return LangOpts; }
+ Diagnostic &getDiagnostics() const { return Diags; }
+ SourceManager &getSourceManager() const { return SourceMgr; }
+
+ /// \brief Helper class that creates diagnostics with optional
+ /// template instantiation stacks.
+ ///
+ /// This class provides a wrapper around the basic DiagnosticBuilder
+ /// class that emits diagnostics. SemaDiagnosticBuilder is
+ /// responsible for emitting the diagnostic (as DiagnosticBuilder
+ /// does) and, if the diagnostic comes from inside a template
+ /// instantiation, printing the template instantiation stack as
+ /// well.
+ class SemaDiagnosticBuilder : public DiagnosticBuilder {
+ Sema &SemaRef;
+ unsigned DiagID;
+
+ public:
+ SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
+ : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
+
+ ~SemaDiagnosticBuilder();
+ };
+
+ /// \brief Emit a diagnostic.
+ SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
+ DiagnosticBuilder DB = Diags.Report(FullSourceLoc(Loc, SourceMgr), DiagID);
+ return SemaDiagnosticBuilder(DB, *this, DiagID);
+ }
+
+ virtual void DeleteExpr(ExprTy *E);
+ virtual void DeleteStmt(StmtTy *S);
+
+ OwningExprResult Owned(Expr* E) { return OwningExprResult(*this, E); }
+ OwningExprResult Owned(ExprResult R) {
+ if (R.isInvalid())
+ return ExprError();
+ return OwningExprResult(*this, R.get());
+ }
+ OwningStmtResult Owned(Stmt* S) { return OwningStmtResult(*this, S); }
+
+ virtual void ActOnEndOfTranslationUnit();
+
+ /// getLabelMap() - Return the current label map. If we're in a block, we
+ /// return it.
+ llvm::DenseMap<IdentifierInfo*, LabelStmt*> &getLabelMap() {
+ return CurBlock ? CurBlock->LabelMap : FunctionLabelMap;
+ }
+
+ /// getSwitchStack - This is returns the switch stack for the current block or
+ /// function.
+ llvm::SmallVector<SwitchStmt*,8> &getSwitchStack() {
+ return CurBlock ? CurBlock->SwitchStack : FunctionSwitchStack;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Type Analysis / Processing: SemaType.cpp.
+ //
+ QualType adjustParameterType(QualType T);
+ QualType ConvertDeclSpecToType(const DeclSpec &DS, SourceLocation DeclLoc,
+ bool &IsInvalid);
+ void ProcessTypeAttributeList(QualType &Result, const AttributeList *AL);
+ QualType BuildPointerType(QualType T, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity);
+ QualType BuildReferenceType(QualType T, bool LValueRef, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity);
+ QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
+ Expr *ArraySize, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity);
+ QualType BuildFunctionType(QualType T,
+ QualType *ParamTypes, unsigned NumParamTypes,
+ bool Variadic, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity);
+ QualType GetTypeForDeclarator(Declarator &D, Scope *S, unsigned Skip = 0,
+ TagDecl **OwnedDecl = 0);
+ DeclarationName GetNameForDeclarator(Declarator &D);
+ bool CheckSpecifiedExceptionType(QualType T, const SourceRange &Range);
+ bool CheckDistantExceptionSpec(QualType T);
+
+ QualType ObjCGetTypeForMethodDefinition(DeclPtrTy D);
+
+ bool UnwrapSimilarPointerTypes(QualType& T1, QualType& T2);
+
+ virtual TypeResult ActOnTypeName(Scope *S, Declarator &D);
+
+ bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned diag,
+ SourceRange Range1 = SourceRange(),
+ SourceRange Range2 = SourceRange(),
+ QualType PrintType = QualType());
+
+ QualType getQualifiedNameType(const CXXScopeSpec &SS, QualType T);
+
+ //===--------------------------------------------------------------------===//
+ // Symbol table / Decl tracking callbacks: SemaDecl.cpp.
+ //
+
+ /// getDeclName - Return a pretty name for the specified decl if possible, or
+ /// an empty string if not. This is used for pretty crash reporting.
+ virtual std::string getDeclName(DeclPtrTy D);
+
+ DeclGroupPtrTy ConvertDeclToDeclGroup(DeclPtrTy Ptr);
+
+ virtual TypeTy *getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
+ Scope *S, const CXXScopeSpec *SS);
+ virtual DeclSpec::TST isTagName(IdentifierInfo &II, Scope *S);
+
+ virtual DeclPtrTy ActOnDeclarator(Scope *S, Declarator &D) {
+ return ActOnDeclarator(S, D, false);
+ }
+ DeclPtrTy ActOnDeclarator(Scope *S, Declarator &D, bool IsFunctionDefinition);
+ void RegisterLocallyScopedExternCDecl(NamedDecl *ND, NamedDecl *PrevDecl,
+ Scope *S);
+ void DiagnoseFunctionSpecifiers(Declarator& D);
+ NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ QualType R, Decl* PrevDecl,
+ bool &Redeclaration);
+ NamedDecl* ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ QualType R, NamedDecl* PrevDecl,
+ bool &Redeclaration);
+ void CheckVariableDeclaration(VarDecl *NewVD, NamedDecl *PrevDecl,
+ bool &Redeclaration);
+ NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ QualType R, NamedDecl* PrevDecl,
+ bool IsFunctionDefinition,
+ bool &Redeclaration);
+ void CheckFunctionDeclaration(FunctionDecl *NewFD, NamedDecl *&PrevDecl,
+ bool &Redeclaration,
+ bool &OverloadableAttrRequired);
+ virtual DeclPtrTy ActOnParamDeclarator(Scope *S, Declarator &D);
+ virtual void ActOnParamDefaultArgument(DeclPtrTy param,
+ SourceLocation EqualLoc,
+ ExprArg defarg);
+ virtual void ActOnParamUnparsedDefaultArgument(DeclPtrTy param,
+ SourceLocation EqualLoc);
+ virtual void ActOnParamDefaultArgumentError(DeclPtrTy param);
+ virtual void AddInitializerToDecl(DeclPtrTy dcl, FullExprArg init);
+ void AddInitializerToDecl(DeclPtrTy dcl, ExprArg init, bool DirectInit);
+ void ActOnUninitializedDecl(DeclPtrTy dcl);
+ virtual void SetDeclDeleted(DeclPtrTy dcl, SourceLocation DelLoc);
+ virtual DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
+ DeclPtrTy *Group,
+ unsigned NumDecls);
+ virtual void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
+ SourceLocation LocAfterDecls);
+ virtual DeclPtrTy ActOnStartOfFunctionDef(Scope *S, Declarator &D);
+ virtual DeclPtrTy ActOnStartOfFunctionDef(Scope *S, DeclPtrTy D);
+ virtual void ActOnStartOfObjCMethodDef(Scope *S, DeclPtrTy D);
+
+ virtual DeclPtrTy ActOnFinishFunctionBody(DeclPtrTy Decl, StmtArg Body);
+ DeclPtrTy ActOnFinishFunctionBody(DeclPtrTy Decl, StmtArg Body,
+ bool IsInstantiation);
+ void DiagnoseInvalidJumps(Stmt *Body);
+ virtual DeclPtrTy ActOnFileScopeAsmDecl(SourceLocation Loc, ExprArg expr);
+
+ /// Scope actions.
+ virtual void ActOnPopScope(SourceLocation Loc, Scope *S);
+ virtual void ActOnTranslationUnitScope(SourceLocation Loc, Scope *S);
+
+ /// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
+ /// no declarator (e.g. "struct foo;") is parsed.
+ virtual DeclPtrTy ParsedFreeStandingDeclSpec(Scope *S, DeclSpec &DS);
+
+ bool InjectAnonymousStructOrUnionMembers(Scope *S, DeclContext *Owner,
+ RecordDecl *AnonRecord);
+ virtual DeclPtrTy BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
+ RecordDecl *Record);
+
+ bool isAcceptableTagRedeclaration(const TagDecl *Previous,
+ TagDecl::TagKind NewTag,
+ SourceLocation NewTagLoc,
+ const IdentifierInfo &Name);
+
+ virtual DeclPtrTy ActOnTag(Scope *S, unsigned TagSpec, TagKind TK,
+ SourceLocation KWLoc, const CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr, AccessSpecifier AS,
+ bool &OwnedDecl);
+
+ virtual void ActOnDefs(Scope *S, DeclPtrTy TagD, SourceLocation DeclStart,
+ IdentifierInfo *ClassName,
+ llvm::SmallVectorImpl<DeclPtrTy> &Decls);
+ virtual DeclPtrTy ActOnField(Scope *S, DeclPtrTy TagD,
+ SourceLocation DeclStart,
+ Declarator &D, ExprTy *BitfieldWidth);
+
+ FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth,
+ AccessSpecifier AS);
+
+ FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
+ RecordDecl *Record, SourceLocation Loc,
+ bool Mutable, Expr *BitfieldWidth,
+ AccessSpecifier AS, NamedDecl *PrevDecl,
+ Declarator *D = 0);
+
+ virtual DeclPtrTy ActOnIvar(Scope *S, SourceLocation DeclStart,
+ Declarator &D, ExprTy *BitfieldWidth,
+ tok::ObjCKeywordKind visibility);
+
+ // This is used for both record definitions and ObjC interface declarations.
+ virtual void ActOnFields(Scope* S,
+ SourceLocation RecLoc, DeclPtrTy TagDecl,
+ DeclPtrTy *Fields, unsigned NumFields,
+ SourceLocation LBrac, SourceLocation RBrac,
+ AttributeList *AttrList);
+
+ /// ActOnTagStartDefinition - Invoked when we have entered the
+ /// scope of a tag's definition (e.g., for an enumeration, class,
+ /// struct, or union).
+ virtual void ActOnTagStartDefinition(Scope *S, DeclPtrTy TagDecl);
+
+ /// ActOnTagFinishDefinition - Invoked once we have finished parsing
+ /// the definition of a tag (enumeration, class, struct, or union).
+ virtual void ActOnTagFinishDefinition(Scope *S, DeclPtrTy TagDecl);
+
+ EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
+ EnumConstantDecl *LastEnumConst,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ ExprArg val);
+
+ virtual DeclPtrTy ActOnEnumConstant(Scope *S, DeclPtrTy EnumDecl,
+ DeclPtrTy LastEnumConstant,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ SourceLocation EqualLoc, ExprTy *Val);
+ virtual void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
+ SourceLocation RBraceLoc, DeclPtrTy EnumDecl,
+ DeclPtrTy *Elements, unsigned NumElements);
+
+ DeclContext *getContainingDC(DeclContext *DC);
+
+ /// Set the current declaration context until it gets popped.
+ void PushDeclContext(Scope *S, DeclContext *DC);
+ void PopDeclContext();
+
+ /// getCurFunctionDecl - If inside of a function body, this returns a pointer
+ /// to the function decl for the function being parsed. If we're currently
+ /// in a 'block', this returns the containing context.
+ FunctionDecl *getCurFunctionDecl();
+
+ /// getCurMethodDecl - If inside of a method body, this returns a pointer to
+ /// the method decl for the method being parsed. If we're currently
+ /// in a 'block', this returns the containing context.
+ ObjCMethodDecl *getCurMethodDecl();
+
+ /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
+ /// or C function we're in, otherwise return null. If we're currently
+ /// in a 'block', this returns the containing context.
+ NamedDecl *getCurFunctionOrMethodDecl();
+
+ /// Add this decl to the scope shadowed decl chains.
+ void PushOnScopeChains(NamedDecl *D, Scope *S);
+
+ /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
+ /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
+ /// true if 'D' belongs to the given declaration context.
+ bool isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S = 0) {
+ return IdResolver.isDeclInScope(D, Ctx, Context, S);
+ }
+
+
+ /// Subroutines of ActOnDeclarator().
+ TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T);
+ void MergeTypeDefDecl(TypedefDecl *New, Decl *Old);
+ bool MergeFunctionDecl(FunctionDecl *New, Decl *Old);
+ bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old);
+ void MergeVarDecl(VarDecl *New, Decl *Old);
+ bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old);
+
+ /// C++ Overloading.
+ bool IsOverload(FunctionDecl *New, Decl* OldD,
+ OverloadedFunctionDecl::function_iterator &MatchedDecl);
+ ImplicitConversionSequence
+ TryImplicitConversion(Expr* From, QualType ToType,
+ bool SuppressUserConversions = false,
+ bool AllowExplicit = false,
+ bool ForceRValue = false);
+ bool IsStandardConversion(Expr *From, QualType ToType,
+ StandardConversionSequence& SCS);
+ bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
+ bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
+ bool IsComplexPromotion(QualType FromType, QualType ToType);
+ bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
+ QualType& ConvertedType, bool &IncompatibleObjC);
+ bool isObjCPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType, bool &IncompatibleObjC);
+ bool CheckPointerConversion(Expr *From, QualType ToType);
+ bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
+ QualType &ConvertedType);
+ bool CheckMemberPointerConversion(Expr *From, QualType ToType);
+ bool IsQualificationConversion(QualType FromType, QualType ToType);
+ bool IsUserDefinedConversion(Expr *From, QualType ToType,
+ UserDefinedConversionSequence& User,
+ bool AllowConversionFunctions,
+ bool AllowExplicit, bool ForceRValue);
+
+ ImplicitConversionSequence::CompareKind
+ CompareImplicitConversionSequences(const ImplicitConversionSequence& ICS1,
+ const ImplicitConversionSequence& ICS2);
+
+ ImplicitConversionSequence::CompareKind
+ CompareStandardConversionSequences(const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+ ImplicitConversionSequence::CompareKind
+ CompareQualificationConversions(const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+ ImplicitConversionSequence::CompareKind
+ CompareDerivedToBaseConversions(const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+ ImplicitConversionSequence
+ TryCopyInitialization(Expr* From, QualType ToType,
+ bool SuppressUserConversions = false,
+ bool ForceRValue = false);
+ bool PerformCopyInitialization(Expr *&From, QualType ToType,
+ const char *Flavor, bool Elidable = false);
+
+ ImplicitConversionSequence
+ TryObjectArgumentInitialization(Expr *From, CXXMethodDecl *Method);
+ bool PerformObjectArgumentInitialization(Expr *&From, CXXMethodDecl *Method);
+
+ ImplicitConversionSequence TryContextuallyConvertToBool(Expr *From);
+ bool PerformContextuallyConvertToBool(Expr *&From);
+
+ /// OverloadingResult - Capture the result of performing overload
+ /// resolution.
+ enum OverloadingResult {
+ OR_Success, ///< Overload resolution succeeded.
+ OR_No_Viable_Function, ///< No viable function found.
+ OR_Ambiguous, ///< Ambiguous candidates found.
+ OR_Deleted ///< Overload resoltuion refers to a deleted function.
+ };
+
+ typedef llvm::SmallPtrSet<FunctionDecl *, 16> FunctionSet;
+ typedef llvm::SmallPtrSet<NamespaceDecl *, 16> AssociatedNamespaceSet;
+ typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
+
+ void AddOverloadCandidate(FunctionDecl *Function,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false,
+ bool ForceRValue = false);
+ void AddFunctionCandidates(const FunctionSet &Functions,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false);
+ void AddMethodCandidate(CXXMethodDecl *Method,
+ Expr *Object, Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false,
+ bool ForceRValue = false);
+ void AddConversionCandidate(CXXConversionDecl *Conversion,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet& CandidateSet);
+ void AddSurrogateCandidate(CXXConversionDecl *Conversion,
+ const FunctionProtoType *Proto,
+ Expr *Object, Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet);
+ void AddOperatorCandidates(OverloadedOperatorKind Op, Scope *S,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ SourceRange OpRange = SourceRange());
+ void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ SourceRange OpRange = SourceRange());
+ void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool IsAssignmentOperator = false,
+ unsigned NumContextualBoolArguments = 0);
+ void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet);
+ void AddArgumentDependentLookupCandidates(DeclarationName Name,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet);
+ bool isBetterOverloadCandidate(const OverloadCandidate& Cand1,
+ const OverloadCandidate& Cand2);
+ OverloadingResult BestViableFunction(OverloadCandidateSet& CandidateSet,
+ OverloadCandidateSet::iterator& Best);
+ void PrintOverloadCandidates(OverloadCandidateSet& CandidateSet,
+ bool OnlyViable);
+
+ FunctionDecl *ResolveAddressOfOverloadedFunction(Expr *From, QualType ToType,
+ bool Complain);
+ void FixOverloadedFunctionReference(Expr *E, FunctionDecl *Fn);
+
+ FunctionDecl *ResolveOverloadedCallFn(Expr *Fn, NamedDecl *Callee,
+ DeclarationName UnqualifiedName,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc,
+ bool &ArgumentDependentLookup);
+
+ OwningExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
+ unsigned Opc,
+ FunctionSet &Functions,
+ ExprArg input);
+
+ OwningExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
+ unsigned Opc,
+ FunctionSet &Functions,
+ Expr *LHS, Expr *RHS);
+
+ ExprResult
+ BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
+ SourceLocation LParenLoc, Expr **Args,
+ unsigned NumArgs, SourceLocation *CommaLocs,
+ SourceLocation RParenLoc);
+ ExprResult
+ BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc);
+
+ ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
+ SourceLocation MemberLoc,
+ IdentifierInfo &Member);
+
+ /// Helpers for dealing with function parameters.
+ bool CheckParmsForFunctionDef(FunctionDecl *FD);
+ void CheckCXXDefaultArguments(FunctionDecl *FD);
+ void CheckExtraCXXDefaultArguments(Declarator &D);
+
+ Scope *getNonFieldDeclScope(Scope *S);
+
+ /// \name Name lookup
+ ///
+ /// These routines provide name lookup that is used during semantic
+ /// analysis to resolve the various kinds of names (identifiers,
+ /// overloaded operator names, constructor names, etc.) into zero or
+ /// more declarations within a particular scope. The major entry
+ /// points are LookupName, which performs unqualified name lookup,
+ /// and LookupQualifiedName, which performs qualified name lookup.
+ ///
+ /// All name lookup is performed based on some specific criteria,
+ /// which specify what names will be visible to name lookup and how
+ /// far name lookup should work. These criteria are important both
+ /// for capturing language semantics (certain lookups will ignore
+ /// certain names, for example) and for performance, since name
+ /// lookup is often a bottleneck in the compilation of C++. Name
+ /// lookup criteria is specified via the LookupCriteria enumeration.
+ ///
+ /// The results of name lookup can vary based on the kind of name
+ /// lookup performed, the current language, and the translation
+ /// unit. In C, for example, name lookup will either return nothing
+ /// (no entity found) or a single declaration. In C++, name lookup
+ /// can additionally refer to a set of overloaded functions or
+ /// result in an ambiguity. All of the possible results of name
+ /// lookup are captured by the LookupResult class, which provides
+ /// the ability to distinguish among them.
+ //@{
+
+ /// @brief Describes the kind of name lookup to perform.
+ enum LookupNameKind {
+ /// Ordinary name lookup, which finds ordinary names (functions,
+ /// variables, typedefs, etc.) in C and most kinds of names
+ /// (functions, variables, members, types, etc.) in C++.
+ LookupOrdinaryName = 0,
+ /// Tag name lookup, which finds the names of enums, classes,
+ /// structs, and unions.
+ LookupTagName,
+ /// Member name lookup, which finds the names of
+ /// class/struct/union members.
+ LookupMemberName,
+ // Look up of an operator name (e.g., operator+) for use with
+ // operator overloading. This lookup is similar to ordinary name
+ // lookup, but will ignore any declarations that are class
+ // members.
+ LookupOperatorName,
+ /// Look up of a name that precedes the '::' scope resolution
+ /// operator in C++. This lookup completely ignores operator,
+ /// function, and enumerator names (C++ [basic.lookup.qual]p1).
+ LookupNestedNameSpecifierName,
+ /// Look up a namespace name within a C++ using directive or
+ /// namespace alias definition, ignoring non-namespace names (C++
+ /// [basic.lookup.udir]p1).
+ LookupNamespaceName,
+ /// Look up an ordinary name that is going to be redeclared as a
+ /// name with linkage. This lookup ignores any declarations that
+ /// are outside of the current scope unless they have linkage. See
+ /// C99 6.2.2p4-5 and C++ [basic.link]p6.
+ LookupRedeclarationWithLinkage,
+ /// Look up the name of an Objective-C protocol.
+ LookupObjCProtocolName,
+ /// Look up the name of an Objective-C implementation
+ LookupObjCImplementationName,
+ /// Look up the name of an Objective-C category implementation
+ LookupObjCCategoryImplName
+ };
+
+ /// @brief Represents the results of name lookup.
+ ///
+ /// An instance of the LookupResult class captures the results of a
+ /// single name lookup, which can return no result (nothing found),
+ /// a single declaration, a set of overloaded functions, or an
+ /// ambiguity. Use the getKind() method to determine which of these
+ /// results occurred for a given lookup.
+ ///
+ /// Any non-ambiguous lookup can be converted into a single
+ /// (possibly NULL) @c NamedDecl* via a conversion function or the
+ /// getAsDecl() method. This conversion permits the common-case
+ /// usage in C and Objective-C where name lookup will always return
+ /// a single declaration.
+ struct LookupResult {
+ /// The kind of entity that is actually stored within the
+ /// LookupResult object.
+ enum {
+ /// First is a single declaration (a NamedDecl*), which may be NULL.
+ SingleDecl,
+
+ /// First is a single declaration (an OverloadedFunctionDecl*).
+ OverloadedDeclSingleDecl,
+
+ /// [First, Last) is an iterator range represented as opaque
+ /// pointers used to reconstruct IdentifierResolver::iterators.
+ OverloadedDeclFromIdResolver,
+
+ /// [First, Last) is an iterator range represented as opaque
+ /// pointers used to reconstruct DeclContext::lookup_iterators.
+ OverloadedDeclFromDeclContext,
+
+ /// First is a pointer to a BasePaths structure, which is owned
+ /// by the LookupResult. Last is non-zero to indicate that the
+ /// ambiguity is caused by two names found in base class
+ /// subobjects of different types.
+ AmbiguousLookupStoresBasePaths,
+
+ /// [First, Last) is an iterator range represented as opaque
+ /// pointers used to reconstruct new'ed Decl*[] array containing
+ /// found ambiguous decls. LookupResult is owner of this array.
+ AmbiguousLookupStoresDecls
+ } StoredKind;
+
+ /// The first lookup result, whose contents depend on the kind of
+ /// lookup result. This may be a NamedDecl* (if StoredKind ==
+ /// SingleDecl), OverloadedFunctionDecl* (if StoredKind ==
+ /// OverloadedDeclSingleDecl), the opaque pointer from an
+ /// IdentifierResolver::iterator (if StoredKind ==
+ /// OverloadedDeclFromIdResolver), a DeclContext::lookup_iterator
+ /// (if StoredKind == OverloadedDeclFromDeclContext), or a
+ /// BasePaths pointer (if StoredKind == AmbiguousLookupStoresBasePaths).
+ mutable uintptr_t First;
+
+ /// The last lookup result, whose contents depend on the kind of
+ /// lookup result. This may be unused (if StoredKind ==
+ /// SingleDecl), it may have the same type as First (for
+ /// overloaded function declarations), or is may be used as a
+ /// Boolean value (if StoredKind == AmbiguousLookupStoresBasePaths).
+ mutable uintptr_t Last;
+
+ /// Context - The context in which we will build any
+ /// OverloadedFunctionDecl nodes needed by the conversion to
+ /// Decl*.
+ ASTContext *Context;
+
+ /// @brief The kind of entity found by name lookup.
+ enum LookupKind {
+ /// @brief No entity found met the criteria.
+ NotFound = 0,
+
+ /// @brief Name lookup found a single declaration that met the
+ /// criteria. getAsDecl will return this declaration.
+ Found,
+
+ /// @brief Name lookup found a set of overloaded functions that
+ /// met the criteria. getAsDecl will turn this set of overloaded
+ /// functions into an OverloadedFunctionDecl.
+ FoundOverloaded,
+
+ /// Name lookup results in an ambiguity because multiple
+ /// entities that meet the lookup criteria were found in
+ /// subobjects of different types. For example:
+ /// @code
+ /// struct A { void f(int); }
+ /// struct B { void f(double); }
+ /// struct C : A, B { };
+ /// void test(C c) {
+ /// c.f(0); // error: A::f and B::f come from subobjects of different
+ /// // types. overload resolution is not performed.
+ /// }
+ /// @endcode
+ AmbiguousBaseSubobjectTypes,
+
+ /// Name lookup results in an ambiguity because multiple
+ /// nonstatic entities that meet the lookup criteria were found
+ /// in different subobjects of the same type. For example:
+ /// @code
+ /// struct A { int x; };
+ /// struct B : A { };
+ /// struct C : A { };
+ /// struct D : B, C { };
+ /// int test(D d) {
+ /// return d.x; // error: 'x' is found in two A subobjects (of B and C)
+ /// }
+ /// @endcode
+ AmbiguousBaseSubobjects,
+
+ /// Name lookup results in an ambiguity because multiple definitions
+ /// of entity that meet the lookup criteria were found in different
+ /// declaration contexts.
+ /// @code
+ /// namespace A {
+ /// int i;
+ /// namespace B { int i; }
+ /// int test() {
+ /// using namespace B;
+ /// return i; // error 'i' is found in namespace A and A::B
+ /// }
+ /// }
+ /// @endcode
+ AmbiguousReference
+ };
+
+ static LookupResult CreateLookupResult(ASTContext &Context, NamedDecl *D);
+
+ static LookupResult CreateLookupResult(ASTContext &Context,
+ IdentifierResolver::iterator F,
+ IdentifierResolver::iterator L);
+
+ static LookupResult CreateLookupResult(ASTContext &Context,
+ DeclContext::lookup_iterator F,
+ DeclContext::lookup_iterator L);
+
+ static LookupResult CreateLookupResult(ASTContext &Context, BasePaths *Paths,
+ bool DifferentSubobjectTypes) {
+ LookupResult Result;
+ Result.StoredKind = AmbiguousLookupStoresBasePaths;
+ Result.First = reinterpret_cast<uintptr_t>(Paths);
+ Result.Last = DifferentSubobjectTypes? 1 : 0;
+ Result.Context = &Context;
+ return Result;
+ }
+
+ template <typename Iterator>
+ static LookupResult CreateLookupResult(ASTContext &Context,
+ Iterator B, std::size_t Len) {
+ NamedDecl ** Array = new NamedDecl*[Len];
+ for (std::size_t Idx = 0; Idx < Len; ++Idx, ++B)
+ Array[Idx] = *B;
+ LookupResult Result;
+ Result.StoredKind = AmbiguousLookupStoresDecls;
+ Result.First = reinterpret_cast<uintptr_t>(Array);
+ Result.Last = reinterpret_cast<uintptr_t>(Array + Len);
+ Result.Context = &Context;
+ return Result;
+ }
+
+ LookupKind getKind() const;
+
+ /// @brief Determine whether name look found something.
+ operator bool() const { return getKind() != NotFound; }
+
+ /// @brief Determines whether the lookup resulted in an ambiguity.
+ bool isAmbiguous() const {
+ return StoredKind == AmbiguousLookupStoresBasePaths ||
+ StoredKind == AmbiguousLookupStoresDecls;
+ }
+
+ /// @brief Allows conversion of a lookup result into a
+ /// declaration, with the same behavior as getAsDecl.
+ operator NamedDecl*() const { return getAsDecl(); }
+
+ NamedDecl* getAsDecl() const;
+
+ BasePaths *getBasePaths() const;
+
+ /// \brief Iterate over the results of name lookup.
+ ///
+ /// The @c iterator class provides iteration over the results of a
+ /// non-ambiguous name lookup.
+ class iterator {
+ /// The LookupResult structure we're iterating through.
+ LookupResult *Result;
+
+ /// The current position of this iterator within the sequence of
+ /// results. This value will have the same representation as the
+ /// @c First field in the LookupResult structure.
+ mutable uintptr_t Current;
+
+ public:
+ typedef NamedDecl * value_type;
+ typedef NamedDecl * reference;
+ typedef NamedDecl * pointer;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ iterator() : Result(0), Current(0) { }
+
+ iterator(LookupResult *Res, uintptr_t Cur) : Result(Res), Current(Cur) { }
+
+ reference operator*() const;
+
+ pointer operator->() const { return **this; }
+
+ iterator &operator++();
+
+ iterator operator++(int) {
+ iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend inline bool operator==(iterator const& x, iterator const& y) {
+ return x.Current == y.Current;
+ }
+
+ friend inline bool operator!=(iterator const& x, iterator const& y) {
+ return x.Current != y.Current;
+ }
+ };
+ friend class iterator;
+
+ iterator begin();
+ iterator end();
+
+ /// \brief Free the memory associated with this lookup.
+ void Destroy();
+ };
+
+private:
+ typedef llvm::SmallVector<LookupResult, 3> LookupResultsVecTy;
+
+ std::pair<bool, LookupResult> CppLookupName(Scope *S, DeclarationName Name,
+ LookupNameKind NameKind,
+ bool RedeclarationOnly);
+ ObjCMethodDecl *FindMethodInNestedImplementations(
+ const ObjCInterfaceDecl *IFace,
+ const Selector &Sel);
+
+public:
+ /// Determines whether D is a suitable lookup result according to the
+ /// lookup criteria.
+ static bool isAcceptableLookupResult(NamedDecl *D, LookupNameKind NameKind,
+ unsigned IDNS) {
+ switch (NameKind) {
+ case Sema::LookupOrdinaryName:
+ case Sema::LookupTagName:
+ case Sema::LookupMemberName:
+ case Sema::LookupRedeclarationWithLinkage: // FIXME: check linkage, scoping
+ case Sema::LookupObjCProtocolName:
+ case Sema::LookupObjCImplementationName:
+ case Sema::LookupObjCCategoryImplName:
+ return D->isInIdentifierNamespace(IDNS);
+
+ case Sema::LookupOperatorName:
+ return D->isInIdentifierNamespace(IDNS) &&
+ !D->getDeclContext()->isRecord();
+
+ case Sema::LookupNestedNameSpecifierName:
+ return isa<TypedefDecl>(D) || D->isInIdentifierNamespace(Decl::IDNS_Tag);
+
+ case Sema::LookupNamespaceName:
+ return isa<NamespaceDecl>(D) || isa<NamespaceAliasDecl>(D);
+ }
+
+ assert(false &&
+ "isAcceptableLookupResult always returns before this point");
+ return false;
+ }
+
+ LookupResult LookupName(Scope *S, DeclarationName Name,
+ LookupNameKind NameKind,
+ bool RedeclarationOnly = false,
+ bool AllowBuiltinCreation = true,
+ SourceLocation Loc = SourceLocation());
+ LookupResult LookupQualifiedName(DeclContext *LookupCtx, DeclarationName Name,
+ LookupNameKind NameKind,
+ bool RedeclarationOnly = false);
+ LookupResult LookupParsedName(Scope *S, const CXXScopeSpec *SS,
+ DeclarationName Name,
+ LookupNameKind NameKind,
+ bool RedeclarationOnly = false,
+ bool AllowBuiltinCreation = true,
+ SourceLocation Loc = SourceLocation());
+
+ ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II);
+ ObjCImplementationDecl *LookupObjCImplementation(IdentifierInfo *II);
+ ObjCCategoryImplDecl *LookupObjCCategoryImpl(IdentifierInfo *II);
+
+ void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
+ QualType T1, QualType T2,
+ FunctionSet &Functions);
+
+ void ArgumentDependentLookup(DeclarationName Name,
+ Expr **Args, unsigned NumArgs,
+ FunctionSet &Functions);
+
+ void FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
+ AssociatedNamespaceSet &AssociatedNamespaces,
+ AssociatedClassSet &AssociatedClasses);
+
+ bool DiagnoseAmbiguousLookup(LookupResult &Result, DeclarationName Name,
+ SourceLocation NameLoc,
+ SourceRange LookupRange = SourceRange());
+ //@}
+
+ ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *Id);
+ NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
+ Scope *S, bool ForRedeclaration,
+ SourceLocation Loc);
+ NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
+ Scope *S);
+ void AddKnownFunctionAttributes(FunctionDecl *FD);
+
+ // More parsing and symbol table subroutines.
+
+ // Decl attributes - this routine is the top level dispatcher.
+ void ProcessDeclAttributes(Decl *D, const Declarator &PD);
+ void ProcessDeclAttributeList(Decl *D, const AttributeList *AttrList);
+
+ void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
+ bool &IncompleteImpl);
+ void WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethod,
+ ObjCMethodDecl *IntfMethod);
+ bool QualifiedIdConformsQualifiedId(QualType LHS, QualType RHS);
+
+ NamespaceDecl *GetStdNamespace();
+
+ bool isPropertyReadonly(ObjCPropertyDecl *PropertyDecl,
+ ObjCInterfaceDecl *IDecl);
+
+ /// CheckProtocolMethodDefs - This routine checks unimplemented
+ /// methods declared in protocol, and those referenced by it.
+ /// \param IDecl - Used for checking for methods which may have been
+ /// inherited.
+ void CheckProtocolMethodDefs(SourceLocation ImpLoc,
+ ObjCProtocolDecl *PDecl,
+ bool& IncompleteImpl,
+ const llvm::DenseSet<Selector> &InsMap,
+ const llvm::DenseSet<Selector> &ClsMap,
+ ObjCInterfaceDecl *IDecl);
+
+ /// CheckImplementationIvars - This routine checks if the instance variables
+ /// listed in the implelementation match those listed in the interface.
+ void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
+ ObjCIvarDecl **Fields, unsigned nIvars,
+ SourceLocation Loc);
+
+ /// ImplMethodsVsClassMethods - This is main routine to warn if any method
+ /// remains unimplemented in the class or category @implementation.
+ void ImplMethodsVsClassMethods(ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* IDecl,
+ bool IncompleteImpl = false);
+
+ /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
+ /// true, or false, accordingly.
+ bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
+ const ObjCMethodDecl *PrevMethod,
+ bool matchBasedOnSizeAndAlignment = false);
+
+ /// MatchAllMethodDeclarations - Check methods declaraed in interface or
+ /// or protocol against those declared in their implementations.
+ void MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
+ const llvm::DenseSet<Selector> &ClsMap,
+ llvm::DenseSet<Selector> &InsMapSeen,
+ llvm::DenseSet<Selector> &ClsMapSeen,
+ ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* IDecl,
+ bool &IncompleteImpl,
+ bool ImmediateClass);
+
+ /// AddInstanceMethodToGlobalPool - All instance methods in a translation
+ /// unit are added to a global pool. This allows us to efficiently associate
+ /// a selector with a method declaraation for purposes of typechecking
+ /// messages sent to "id" (where the class of the object is unknown).
+ void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method);
+
+ /// LookupInstanceMethodInGlobalPool - Returns the method and warns if
+ /// there are multiple signatures.
+ ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R);
+
+ /// LookupFactoryMethodInGlobalPool - Returns the method and warns if
+ /// there are multiple signatures.
+ ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R);
+
+ /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
+ void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method);
+ //===--------------------------------------------------------------------===//
+ // Statement Parsing Callbacks: SemaStmt.cpp.
+public:
+ virtual OwningStmtResult ActOnExprStmt(FullExprArg Expr);
+
+ virtual OwningStmtResult ActOnNullStmt(SourceLocation SemiLoc);
+ virtual OwningStmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
+ MultiStmtArg Elts,
+ bool isStmtExpr);
+ virtual OwningStmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ virtual OwningStmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprArg LHSVal,
+ SourceLocation DotDotDotLoc, ExprArg RHSVal,
+ SourceLocation ColonLoc);
+ virtual void ActOnCaseStmtBody(StmtTy *CaseStmt, StmtArg SubStmt);
+
+ virtual OwningStmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
+ SourceLocation ColonLoc,
+ StmtArg SubStmt, Scope *CurScope);
+ virtual OwningStmtResult ActOnLabelStmt(SourceLocation IdentLoc,
+ IdentifierInfo *II,
+ SourceLocation ColonLoc,
+ StmtArg SubStmt);
+ virtual OwningStmtResult ActOnIfStmt(SourceLocation IfLoc,
+ FullExprArg CondVal, StmtArg ThenVal,
+ SourceLocation ElseLoc, StmtArg ElseVal);
+ virtual OwningStmtResult ActOnStartOfSwitchStmt(ExprArg Cond);
+ virtual OwningStmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
+ StmtArg Switch, StmtArg Body);
+ virtual OwningStmtResult ActOnWhileStmt(SourceLocation WhileLoc,
+ FullExprArg Cond, StmtArg Body);
+ virtual OwningStmtResult ActOnDoStmt(SourceLocation DoLoc, StmtArg Body,
+ SourceLocation WhileLoc, ExprArg Cond);
+
+ virtual OwningStmtResult ActOnForStmt(SourceLocation ForLoc,
+ SourceLocation LParenLoc,
+ StmtArg First, ExprArg Second,
+ ExprArg Third, SourceLocation RParenLoc,
+ StmtArg Body);
+ virtual OwningStmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
+ SourceLocation LParenLoc,
+ StmtArg First, ExprArg Second,
+ SourceLocation RParenLoc, StmtArg Body);
+
+ virtual OwningStmtResult ActOnGotoStmt(SourceLocation GotoLoc,
+ SourceLocation LabelLoc,
+ IdentifierInfo *LabelII);
+ virtual OwningStmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
+ SourceLocation StarLoc,
+ ExprArg DestExp);
+ virtual OwningStmtResult ActOnContinueStmt(SourceLocation ContinueLoc,
+ Scope *CurScope);
+ virtual OwningStmtResult ActOnBreakStmt(SourceLocation GotoLoc,
+ Scope *CurScope);
+
+ virtual OwningStmtResult ActOnReturnStmt(SourceLocation ReturnLoc,
+ FullExprArg RetValExp);
+ OwningStmtResult ActOnBlockReturnStmt(SourceLocation ReturnLoc,
+ Expr *RetValExp);
+
+ virtual OwningStmtResult ActOnAsmStmt(SourceLocation AsmLoc,
+ bool IsSimple,
+ bool IsVolatile,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ std::string *Names,
+ MultiExprArg Constraints,
+ MultiExprArg Exprs,
+ ExprArg AsmString,
+ MultiExprArg Clobbers,
+ SourceLocation RParenLoc);
+
+ virtual OwningStmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParen,
+ DeclPtrTy Parm, StmtArg Body,
+ StmtArg CatchList);
+
+ virtual OwningStmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc,
+ StmtArg Body);
+
+ virtual OwningStmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc,
+ StmtArg Try,
+ StmtArg Catch, StmtArg Finally);
+
+ virtual OwningStmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc,
+ ExprArg Throw,
+ Scope *CurScope);
+ virtual OwningStmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
+ ExprArg SynchExpr,
+ StmtArg SynchBody);
+
+ VarDecl *BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
+ IdentifierInfo *Name,
+ SourceLocation Loc,
+ SourceRange Range);
+ virtual DeclPtrTy ActOnExceptionDeclarator(Scope *S, Declarator &D);
+
+ virtual OwningStmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
+ DeclPtrTy ExDecl,
+ StmtArg HandlerBlock);
+ virtual OwningStmtResult ActOnCXXTryBlock(SourceLocation TryLoc,
+ StmtArg TryBlock,
+ MultiStmtArg Handlers);
+ void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Parsing Callbacks: SemaExpr.cpp.
+
+ bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc);
+ bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
+ ObjCMethodDecl *Getter,
+ SourceLocation Loc);
+ void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
+ Expr **Args, unsigned NumArgs);
+
+ // Primary Expressions.
+ virtual SourceRange getExprRange(ExprTy *E) const;
+
+ virtual OwningExprResult ActOnIdentifierExpr(Scope *S, SourceLocation Loc,
+ IdentifierInfo &II,
+ bool HasTrailingLParen,
+ const CXXScopeSpec *SS = 0,
+ bool isAddressOfOperand = false);
+ virtual OwningExprResult ActOnCXXOperatorFunctionIdExpr(Scope *S,
+ SourceLocation OperatorLoc,
+ OverloadedOperatorKind Op,
+ bool HasTrailingLParen,
+ const CXXScopeSpec &SS,
+ bool isAddressOfOperand);
+ virtual OwningExprResult ActOnCXXConversionFunctionExpr(Scope *S,
+ SourceLocation OperatorLoc,
+ TypeTy *Ty,
+ bool HasTrailingLParen,
+ const CXXScopeSpec &SS,
+ bool isAddressOfOperand);
+ DeclRefExpr *BuildDeclRefExpr(NamedDecl *D, QualType Ty, SourceLocation Loc,
+ bool TypeDependent, bool ValueDependent,
+ const CXXScopeSpec *SS = 0);
+ VarDecl *BuildAnonymousStructUnionMemberPath(FieldDecl *Field,
+ llvm::SmallVectorImpl<FieldDecl *> &Path);
+ OwningExprResult
+ BuildAnonymousStructUnionMemberReference(SourceLocation Loc,
+ FieldDecl *Field,
+ Expr *BaseObjectExpr = 0,
+ SourceLocation OpLoc = SourceLocation());
+ OwningExprResult ActOnDeclarationNameExpr(Scope *S, SourceLocation Loc,
+ DeclarationName Name,
+ bool HasTrailingLParen,
+ const CXXScopeSpec *SS,
+ bool isAddressOfOperand = false);
+
+ virtual OwningExprResult ActOnPredefinedExpr(SourceLocation Loc,
+ tok::TokenKind Kind);
+ virtual OwningExprResult ActOnNumericConstant(const Token &);
+ virtual OwningExprResult ActOnCharacterConstant(const Token &);
+ virtual OwningExprResult ActOnParenExpr(SourceLocation L, SourceLocation R,
+ ExprArg Val);
+
+ /// ActOnStringLiteral - The specified tokens were lexed as pasted string
+ /// fragments (e.g. "foo" "bar" L"baz").
+ virtual OwningExprResult ActOnStringLiteral(const Token *Toks,
+ unsigned NumToks);
+
+ // Binary/Unary Operators. 'Tok' is the token for the operator.
+ OwningExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc,
+ unsigned OpcIn,
+ ExprArg InputArg);
+ virtual OwningExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Op, ExprArg Input);
+
+ OwningExprResult CreateSizeOfAlignOfExpr(QualType T, SourceLocation OpLoc,
+ bool isSizeOf, SourceRange R);
+ OwningExprResult CreateSizeOfAlignOfExpr(Expr *E, SourceLocation OpLoc,
+ bool isSizeOf, SourceRange R);
+ virtual OwningExprResult
+ ActOnSizeOfAlignOfExpr(SourceLocation OpLoc, bool isSizeof, bool isType,
+ void *TyOrEx, const SourceRange &ArgRange);
+
+ bool CheckAlignOfExpr(Expr *E, SourceLocation OpLoc, const SourceRange &R);
+ bool CheckSizeOfAlignOfOperand(QualType type, SourceLocation OpLoc,
+ const SourceRange &R, bool isSizeof);
+
+ virtual OwningExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Kind,
+ ExprArg Input);
+
+ virtual OwningExprResult ActOnArraySubscriptExpr(Scope *S, ExprArg Base,
+ SourceLocation LLoc,
+ ExprArg Idx,
+ SourceLocation RLoc);
+ virtual OwningExprResult ActOnMemberReferenceExpr(Scope *S, ExprArg Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ SourceLocation MemberLoc,
+ IdentifierInfo &Member,
+ DeclPtrTy ImplDecl);
+ bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
+ FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc);
+
+ /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
+ /// This provides the location of the left/right parens and a list of comma
+ /// locations.
+ virtual OwningExprResult ActOnCallExpr(Scope *S, ExprArg Fn,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc);
+
+ virtual OwningExprResult ActOnCastExpr(SourceLocation LParenLoc, TypeTy *Ty,
+ SourceLocation RParenLoc, ExprArg Op);
+
+ virtual OwningExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
+ TypeTy *Ty,
+ SourceLocation RParenLoc,
+ ExprArg Op);
+
+ virtual OwningExprResult ActOnInitList(SourceLocation LParenLoc,
+ MultiExprArg InitList,
+ SourceLocation RParenLoc);
+
+ virtual OwningExprResult ActOnDesignatedInitializer(Designation &Desig,
+ SourceLocation Loc,
+ bool GNUSyntax,
+ OwningExprResult Init);
+
+ virtual OwningExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
+ tok::TokenKind Kind,
+ ExprArg LHS, ExprArg RHS);
+ OwningExprResult CreateBuiltinBinOp(SourceLocation TokLoc,
+ unsigned Opc, Expr *lhs, Expr *rhs);
+
+ /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
+ /// in the case of a the GNU conditional expr extension.
+ virtual OwningExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
+ SourceLocation ColonLoc,
+ ExprArg Cond, ExprArg LHS,
+ ExprArg RHS);
+
+ /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
+ virtual OwningExprResult ActOnAddrLabel(SourceLocation OpLoc,
+ SourceLocation LabLoc,
+ IdentifierInfo *LabelII);
+
+ virtual OwningExprResult ActOnStmtExpr(SourceLocation LPLoc, StmtArg SubStmt,
+ SourceLocation RPLoc); // "({..})"
+
+ /// __builtin_offsetof(type, a.b[123][456].c)
+ virtual OwningExprResult ActOnBuiltinOffsetOf(Scope *S,
+ SourceLocation BuiltinLoc,
+ SourceLocation TypeLoc,
+ TypeTy *Arg1,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RParenLoc);
+
+ // __builtin_types_compatible_p(type1, type2)
+ virtual OwningExprResult ActOnTypesCompatibleExpr(SourceLocation BuiltinLoc,
+ TypeTy *arg1, TypeTy *arg2,
+ SourceLocation RPLoc);
+
+ // __builtin_choose_expr(constExpr, expr1, expr2)
+ virtual OwningExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
+ ExprArg cond, ExprArg expr1,
+ ExprArg expr2, SourceLocation RPLoc);
+
+ // __builtin_va_arg(expr, type)
+ virtual OwningExprResult ActOnVAArg(SourceLocation BuiltinLoc,
+ ExprArg expr, TypeTy *type,
+ SourceLocation RPLoc);
+
+ // __null
+ virtual OwningExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
+
+ //===------------------------- "Block" Extension ------------------------===//
+
+ /// ActOnBlockStart - This callback is invoked when a block literal is
+ /// started.
+ virtual void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
+
+ /// ActOnBlockArguments - This callback allows processing of block arguments.
+ /// If there are no arguments, this is still invoked.
+ virtual void ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope);
+
+ /// ActOnBlockError - If there is an error parsing a block, this callback
+ /// is invoked to pop the information about the block from the action impl.
+ virtual void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
+
+ /// ActOnBlockStmtExpr - This is called when the body of a block statement
+ /// literal was successfully completed. ^(int x){...}
+ virtual OwningExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc,
+ StmtArg Body, Scope *CurScope);
+
+ //===---------------------------- C++ Features --------------------------===//
+
+ // Act on C++ namespaces
+ virtual DeclPtrTy ActOnStartNamespaceDef(Scope *S, SourceLocation IdentLoc,
+ IdentifierInfo *Ident,
+ SourceLocation LBrace);
+ virtual void ActOnFinishNamespaceDef(DeclPtrTy Dcl, SourceLocation RBrace);
+
+ virtual DeclPtrTy ActOnUsingDirective(Scope *CurScope,
+ SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ AttributeList *AttrList);
+
+ void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
+
+ virtual DeclPtrTy ActOnNamespaceAliasDef(Scope *CurScope,
+ SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ const CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident);
+
+ /// AddCXXDirectInitializerToDecl - This action is called immediately after
+ /// ActOnDeclarator, when a C++ direct initializer is present.
+ /// e.g: "int x(1);"
+ virtual void AddCXXDirectInitializerToDecl(DeclPtrTy Dcl,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc);
+
+ /// InitializeVarWithConstructor - Creates an CXXConstructExpr
+ /// and sets it as the initializer for the the passed in VarDecl.
+ void InitializeVarWithConstructor(VarDecl *VD,
+ CXXConstructorDecl *Constructor,
+ QualType DeclInitType,
+ Expr **Exprs, unsigned NumExprs);
+
+ /// MaybeBindToTemporary - If the passed in expression has a record type with
+ /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
+ /// it simply returns the passed in expression.
+ OwningExprResult MaybeBindToTemporary(Expr *E);
+
+ /// RemoveOutermostTemporaryBinding - Remove and destroy the outermost
+ /// CXXBindToTemporaryExpr if necessary. This is used when we want to not
+ /// destroy a temporary when a full expression has been evaluated.
+ /// For example:
+ ///
+ /// const T& t = T(10, T());
+ ///
+ /// Here the outermost T needs to be destroyed when t goes out of scope, but
+ /// the innermost T needs to be destroyed when the expr has been evaluated.
+ Expr *RemoveOutermostTemporaryBinding(Expr *E);
+
+ /// InitializationKind - Represents which kind of C++ initialization
+ /// [dcl.init] a routine is to perform.
+ enum InitializationKind {
+ IK_Direct, ///< Direct initialization
+ IK_Copy, ///< Copy initialization
+ IK_Default ///< Default initialization
+ };
+
+ CXXConstructorDecl *
+ PerformInitializationByConstructor(QualType ClassType,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation Loc, SourceRange Range,
+ DeclarationName InitEntity,
+ InitializationKind Kind);
+
+ /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+ virtual OwningExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
+ tok::TokenKind Kind,
+ SourceLocation LAngleBracketLoc,
+ TypeTy *Ty,
+ SourceLocation RAngleBracketLoc,
+ SourceLocation LParenLoc,
+ ExprArg E,
+ SourceLocation RParenLoc);
+
+ /// ActOnCXXTypeid - Parse typeid( something ).
+ virtual OwningExprResult ActOnCXXTypeid(SourceLocation OpLoc,
+ SourceLocation LParenLoc, bool isType,
+ void *TyOrExpr,
+ SourceLocation RParenLoc);
+
+ //// ActOnCXXThis - Parse 'this' pointer.
+ virtual OwningExprResult ActOnCXXThis(SourceLocation ThisLoc);
+
+ /// ActOnCXXBoolLiteral - Parse {true,false} literals.
+ virtual OwningExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc,
+ tok::TokenKind Kind);
+
+ /// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
+ virtual OwningExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
+
+ //// ActOnCXXThrow - Parse throw expressions.
+ virtual OwningExprResult ActOnCXXThrow(SourceLocation OpLoc,
+ ExprArg expr);
+ bool CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E);
+
+ /// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
+ /// Can be interpreted either as function-style casting ("int(x)")
+ /// or class type construction ("ClassType(x,y,z)")
+ /// or creation of a value-initialized type ("int()").
+ virtual OwningExprResult ActOnCXXTypeConstructExpr(SourceRange TypeRange,
+ TypeTy *TypeRep,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc);
+
+ /// ActOnCXXNew - Parsed a C++ 'new' expression.
+ virtual OwningExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ bool ParenTypeId, Declarator &D,
+ SourceLocation ConstructorLParen,
+ MultiExprArg ConstructorArgs,
+ SourceLocation ConstructorRParen);
+ OwningExprResult BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ bool ParenTypeId,
+ QualType AllocType,
+ SourceLocation TypeLoc,
+ SourceRange TypeRange,
+ ExprArg ArraySize,
+ SourceLocation ConstructorLParen,
+ MultiExprArg ConstructorArgs,
+ SourceLocation ConstructorRParen);
+
+ bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
+ SourceRange R);
+ bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
+ bool UseGlobal, QualType AllocType, bool IsArray,
+ Expr **PlaceArgs, unsigned NumPlaceArgs,
+ FunctionDecl *&OperatorNew,
+ FunctionDecl *&OperatorDelete);
+ bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
+ DeclarationName Name, Expr** Args,
+ unsigned NumArgs, DeclContext *Ctx,
+ bool AllowMissing, FunctionDecl *&Operator);
+ void DeclareGlobalNewDelete();
+ void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
+ QualType Argument);
+
+ /// ActOnCXXDelete - Parsed a C++ 'delete' expression
+ virtual OwningExprResult ActOnCXXDelete(SourceLocation StartLoc,
+ bool UseGlobal, bool ArrayForm,
+ ExprArg Operand);
+
+ /// ActOnCXXConditionDeclarationExpr - Parsed a condition declaration of a
+ /// C++ if/switch/while/for statement.
+ /// e.g: "if (int x = f()) {...}"
+ virtual OwningExprResult ActOnCXXConditionDeclarationExpr(Scope *S,
+ SourceLocation StartLoc,
+ Declarator &D,
+ SourceLocation EqualLoc,
+ ExprArg AssignExprVal);
+
+ /// ActOnUnaryTypeTrait - Parsed one of the unary type trait support
+ /// pseudo-functions.
+ virtual OwningExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ SourceLocation LParen,
+ TypeTy *Ty,
+ SourceLocation RParen);
+
+ virtual OwningExprResult ActOnFinishFullExpr(ExprArg Expr);
+
+ bool RequireCompleteDeclContext(const CXXScopeSpec &SS);
+
+ DeclContext *computeDeclContext(const CXXScopeSpec &SS);
+ bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
+ CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
+ bool isUnknownSpecialization(const CXXScopeSpec &SS);
+
+ /// ActOnCXXGlobalScopeSpecifier - Return the object that represents the
+ /// global scope ('::').
+ virtual CXXScopeTy *ActOnCXXGlobalScopeSpecifier(Scope *S,
+ SourceLocation CCLoc);
+
+ /// ActOnCXXNestedNameSpecifier - Called during parsing of a
+ /// nested-name-specifier. e.g. for "foo::bar::" we parsed "foo::" and now
+ /// we want to resolve "bar::". 'SS' is empty or the previously parsed
+ /// nested-name part ("foo::"), 'IdLoc' is the source location of 'bar',
+ /// 'CCLoc' is the location of '::' and 'II' is the identifier for 'bar'.
+ /// Returns a CXXScopeTy* object representing the C++ scope.
+ virtual CXXScopeTy *ActOnCXXNestedNameSpecifier(Scope *S,
+ const CXXScopeSpec &SS,
+ SourceLocation IdLoc,
+ SourceLocation CCLoc,
+ IdentifierInfo &II);
+
+ /// ActOnCXXNestedNameSpecifier - Called during parsing of a
+ /// nested-name-specifier that involves a template-id, e.g.,
+ /// "foo::bar<int, float>::", and now we need to build a scope
+ /// specifier. \p SS is empty or the previously parsed nested-name
+ /// part ("foo::"), \p Type is the already-parsed class template
+ /// specialization (or other template-id that names a type), \p
+ /// TypeRange is the source range where the type is located, and \p
+ /// CCLoc is the location of the trailing '::'.
+ virtual CXXScopeTy *ActOnCXXNestedNameSpecifier(Scope *S,
+ const CXXScopeSpec &SS,
+ TypeTy *Type,
+ SourceRange TypeRange,
+ SourceLocation CCLoc);
+
+ /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
+ /// scope or nested-name-specifier) is parsed, part of a declarator-id.
+ /// After this method is called, according to [C++ 3.4.3p3], names should be
+ /// looked up in the declarator-id's scope, until the declarator is parsed and
+ /// ActOnCXXExitDeclaratorScope is called.
+ /// The 'SS' should be a non-empty valid CXXScopeSpec.
+ virtual void ActOnCXXEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
+
+ /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
+ /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
+ /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
+ /// Used to indicate that names should revert to being looked up in the
+ /// defining scope.
+ virtual void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
+
+ // ParseObjCStringLiteral - Parse Objective-C string literals.
+ virtual ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
+ ExprTy **Strings,
+ unsigned NumStrings);
+ virtual ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
+ SourceLocation EncodeLoc,
+ SourceLocation LParenLoc,
+ TypeTy *Ty,
+ SourceLocation RParenLoc);
+
+ // ParseObjCSelectorExpression - Build selector expression for @selector
+ virtual ExprResult ParseObjCSelectorExpression(Selector Sel,
+ SourceLocation AtLoc,
+ SourceLocation SelLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ // ParseObjCProtocolExpression - Build protocol expression for @protocol
+ virtual ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
+ SourceLocation AtLoc,
+ SourceLocation ProtoLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Declarations
+ //
+ virtual DeclPtrTy ActOnStartLinkageSpecification(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ const char *Lang,
+ unsigned StrSize,
+ SourceLocation LBraceLoc);
+ virtual DeclPtrTy ActOnFinishLinkageSpecification(Scope *S,
+ DeclPtrTy LinkageSpec,
+ SourceLocation RBraceLoc);
+
+
+ //===--------------------------------------------------------------------===//
+ // C++ Classes
+ //
+ virtual bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
+ const CXXScopeSpec *SS);
+
+ virtual DeclPtrTy ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
+ Declarator &D,
+ ExprTy *BitfieldWidth,
+ ExprTy *Init,
+ bool Deleted = false);
+
+ virtual MemInitResult ActOnMemInitializer(DeclPtrTy ConstructorD,
+ Scope *S,
+ IdentifierInfo *MemberOrBase,
+ SourceLocation IdLoc,
+ SourceLocation LParenLoc,
+ ExprTy **Args, unsigned NumArgs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc);
+
+ void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
+
+ virtual void ActOnMemInitializers(DeclPtrTy ConstructorDecl,
+ SourceLocation ColonLoc,
+ MemInitTy **MemInits, unsigned NumMemInits);
+
+ virtual void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
+ DeclPtrTy TagDecl,
+ SourceLocation LBrac,
+ SourceLocation RBrac);
+
+ virtual void ActOnReenterTemplateScope(Scope *S, DeclPtrTy Template);
+ virtual void ActOnStartDelayedCXXMethodDeclaration(Scope *S,
+ DeclPtrTy Method);
+ virtual void ActOnDelayedCXXMethodParameter(Scope *S, DeclPtrTy Param);
+ virtual void ActOnFinishDelayedCXXMethodDeclaration(Scope *S,
+ DeclPtrTy Method);
+
+ virtual DeclPtrTy ActOnStaticAssertDeclaration(SourceLocation AssertLoc,
+ ExprArg AssertExpr,
+ ExprArg AssertMessageExpr);
+
+ virtual bool ActOnFriendDecl(Scope *S, SourceLocation FriendLoc,
+ DeclPtrTy Dcl);
+
+ QualType CheckConstructorDeclarator(Declarator &D, QualType R,
+ FunctionDecl::StorageClass& SC);
+ void CheckConstructor(CXXConstructorDecl *Constructor);
+ QualType CheckDestructorDeclarator(Declarator &D,
+ FunctionDecl::StorageClass& SC);
+ void CheckConversionDeclarator(Declarator &D, QualType &R,
+ FunctionDecl::StorageClass& SC);
+ DeclPtrTy ActOnConversionDeclarator(CXXConversionDecl *Conversion);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Derived Classes
+ //
+
+ /// ActOnBaseSpecifier - Parsed a base specifier
+ CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ QualType BaseType,
+ SourceLocation BaseLoc);
+ virtual BaseResult ActOnBaseSpecifier(DeclPtrTy classdecl,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeTy *basetype, SourceLocation
+ BaseLoc);
+
+ bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
+ unsigned NumBases);
+ virtual void ActOnBaseSpecifiers(DeclPtrTy ClassDecl, BaseTy **Bases,
+ unsigned NumBases);
+
+ bool IsDerivedFrom(QualType Derived, QualType Base);
+ bool IsDerivedFrom(QualType Derived, QualType Base, BasePaths &Paths);
+ bool LookupInBases(CXXRecordDecl *Class, const MemberLookupCriteria& Criteria,
+ BasePaths &Paths);
+ bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ SourceLocation Loc, SourceRange Range);
+ bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ unsigned InaccessibleBaseID,
+ unsigned AmbigiousBaseConvID,
+ SourceLocation Loc, SourceRange Range,
+ DeclarationName Name);
+
+ std::string getAmbiguousPathsDisplayString(BasePaths &Paths);
+
+ /// CheckReturnTypeCovariance - Checks whether two types are covariant,
+ /// according to C++ [class.virtual]p5.
+ bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old);
+
+
+ //===--------------------------------------------------------------------===//
+ // C++ Access Control
+ //
+
+ bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
+ NamedDecl *PrevMemberDecl,
+ AccessSpecifier LexicalAS);
+
+ bool CheckBaseClassAccess(QualType Derived, QualType Base,
+ unsigned InaccessibleBaseID,
+ BasePaths& Paths, SourceLocation AccessLoc,
+ DeclarationName Name);
+
+
+ enum AbstractDiagSelID {
+ AbstractNone = -1,
+ AbstractReturnType,
+ AbstractParamType,
+ AbstractVariableType,
+ AbstractFieldType
+ };
+
+ bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
+ AbstractDiagSelID SelID = AbstractNone,
+ const CXXRecordDecl *CurrentRD = 0);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Overloaded Operators [C++ 13.5]
+ //
+
+ bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Templates [C++ 14]
+ //
+ virtual TemplateNameKind isTemplateName(const IdentifierInfo &II, Scope *S,
+ TemplateTy &Template,
+ const CXXScopeSpec *SS = 0);
+ bool DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
+ TemplateDecl *AdjustDeclIfTemplate(DeclPtrTy &Decl);
+
+ virtual DeclPtrTy ActOnTypeParameter(Scope *S, bool Typename,
+ SourceLocation KeyLoc,
+ IdentifierInfo *ParamName,
+ SourceLocation ParamNameLoc,
+ unsigned Depth, unsigned Position);
+ virtual void ActOnTypeParameterDefault(DeclPtrTy TypeParam,
+ SourceLocation EqualLoc,
+ SourceLocation DefaultLoc,
+ TypeTy *Default);
+
+ QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
+ virtual DeclPtrTy ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
+ unsigned Depth,
+ unsigned Position);
+ virtual void ActOnNonTypeTemplateParameterDefault(DeclPtrTy TemplateParam,
+ SourceLocation EqualLoc,
+ ExprArg Default);
+ virtual DeclPtrTy ActOnTemplateTemplateParameter(Scope *S,
+ SourceLocation TmpLoc,
+ TemplateParamsTy *Params,
+ IdentifierInfo *ParamName,
+ SourceLocation ParamNameLoc,
+ unsigned Depth,
+ unsigned Position);
+ virtual void ActOnTemplateTemplateParameterDefault(DeclPtrTy TemplateParam,
+ SourceLocation EqualLoc,
+ ExprArg Default);
+
+ virtual TemplateParamsTy *
+ ActOnTemplateParameterList(unsigned Depth,
+ SourceLocation ExportLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ DeclPtrTy *Params, unsigned NumParams,
+ SourceLocation RAngleLoc);
+ bool CheckTemplateParameterList(TemplateParameterList *NewParams,
+ TemplateParameterList *OldParams);
+
+ virtual DeclResult
+ ActOnClassTemplate(Scope *S, unsigned TagSpec, TagKind TK,
+ SourceLocation KWLoc, const CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TemplateParameterLists,
+ AccessSpecifier AS);
+
+ QualType CheckTemplateIdType(TemplateName Template,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceLocation RAngleLoc);
+
+ virtual TypeResult
+ ActOnTemplateIdType(TemplateTy Template, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation *TemplateArgLocs,
+ SourceLocation RAngleLoc);
+
+ virtual TemplateTy ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
+ const IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ const CXXScopeSpec &SS);
+
+ bool CheckClassTemplateSpecializationScope(ClassTemplateDecl *ClassTemplate,
+ ClassTemplateSpecializationDecl *PrevDecl,
+ SourceLocation TemplateNameLoc,
+ SourceRange ScopeSpecifierRange,
+ bool ExplicitInstantiation);
+
+ virtual DeclResult
+ ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagKind TK,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation *TemplateArgLocs,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TemplateParameterLists);
+
+ virtual DeclResult
+ ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation *TemplateArgLocs,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr);
+
+ virtual DeclResult
+ ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ AttributeList *Attr);
+
+ bool CheckTemplateArgumentList(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceLocation RAngleLoc,
+ llvm::SmallVectorImpl<TemplateArgument> &Converted);
+
+ bool CheckTemplateArgument(TemplateTypeParmDecl *Param, QualType Arg,
+ SourceLocation ArgLoc);
+ bool CheckTemplateArgumentAddressOfObjectOrFunction(Expr *Arg,
+ NamedDecl *&Entity);
+ bool CheckTemplateArgumentPointerToMember(Expr *Arg, NamedDecl *&Member);
+ bool CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
+ QualType InstantiatedParamType, Expr *&Arg,
+ llvm::SmallVectorImpl<TemplateArgument> *Converted = 0);
+ bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, DeclRefExpr *Arg);
+ bool TemplateParameterListsAreEqual(TemplateParameterList *New,
+ TemplateParameterList *Old,
+ bool Complain,
+ bool IsTemplateTemplateParm = false,
+ SourceLocation TemplateArgLoc
+ = SourceLocation());
+
+ bool CheckTemplateDeclScope(Scope *S,
+ MultiTemplateParamsArg &TemplateParameterLists);
+
+ /// \brief Called when the parser has parsed a C++ typename
+ /// specifier, e.g., "typename T::type".
+ ///
+ /// \param TypenameLoc the location of the 'typename' keyword
+ /// \param SS the nested-name-specifier following the typename (e.g., 'T::').
+ /// \param II the identifier we're retrieving (e.g., 'type' in the example).
+ /// \param IdLoc the location of the identifier.
+ virtual TypeResult
+ ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
+ const IdentifierInfo &II, SourceLocation IdLoc);
+
+ /// \brief Called when the parser has parsed a C++ typename
+ /// specifier that ends in a template-id, e.g.,
+ /// "typename MetaFun::template apply<T1, T2>".
+ ///
+ /// \param TypenameLoc the location of the 'typename' keyword
+ /// \param SS the nested-name-specifier following the typename (e.g., 'T::').
+ /// \param TemplateLoc the location of the 'template' keyword, if any.
+ /// \param Ty the type that the typename specifier refers to.
+ virtual TypeResult
+ ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
+ SourceLocation TemplateLoc, TypeTy *Ty);
+
+ QualType CheckTypenameType(NestedNameSpecifier *NNS,
+ const IdentifierInfo &II,
+ SourceRange Range);
+
+ bool DeduceTemplateArguments(QualType Param, QualType Arg,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced);
+ bool DeduceTemplateArguments(const TemplateArgument &Param,
+ const TemplateArgument &Arg,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced);
+ bool DeduceTemplateArguments(const TemplateArgumentList &ParamList,
+ const TemplateArgumentList &ArgList,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced);
+ bool DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Template Instantiation
+ //
+
+ const TemplateArgumentList &getTemplateInstantiationArgs(NamedDecl *D);
+
+ /// \brief A template instantiation that is currently in progress.
+ struct ActiveTemplateInstantiation {
+ /// \brief The kind of template instantiation we are performing
+ enum {
+ /// We are instantiating a template declaration. The entity is
+ /// the declaration we're instantiating (e.g., a CXXRecordDecl).
+ TemplateInstantiation,
+
+ /// We are instantiating a default argument for a template
+ /// parameter. The Entity is the template, and
+ /// TemplateArgs/NumTemplateArguments provides the template
+ /// arguments as specified.
+ DefaultTemplateArgumentInstantiation
+ } Kind;
+
+ /// \brief The point of instantiation within the source code.
+ SourceLocation PointOfInstantiation;
+
+ /// \brief The entity that is being instantiated.
+ uintptr_t Entity;
+
+ // \brief If this the instantiation of a default template
+ // argument, the list of template arguments.
+ const TemplateArgument *TemplateArgs;
+
+ /// \brief The number of template arguments in TemplateArgs.
+ unsigned NumTemplateArgs;
+
+ /// \brief The source range that covers the construct that cause
+ /// the instantiation, e.g., the template-id that causes a class
+ /// template instantiation.
+ SourceRange InstantiationRange;
+
+ friend bool operator==(const ActiveTemplateInstantiation &X,
+ const ActiveTemplateInstantiation &Y) {
+ if (X.Kind != Y.Kind)
+ return false;
+
+ if (X.Entity != Y.Entity)
+ return false;
+
+ switch (X.Kind) {
+ case TemplateInstantiation:
+ return true;
+
+ case DefaultTemplateArgumentInstantiation:
+ return X.TemplateArgs == Y.TemplateArgs;
+ }
+
+ return true;
+ }
+
+ friend bool operator!=(const ActiveTemplateInstantiation &X,
+ const ActiveTemplateInstantiation &Y) {
+ return !(X == Y);
+ }
+ };
+
+ /// \brief List of active template instantiations.
+ ///
+ /// This vector is treated as a stack. As one template instantiation
+ /// requires another template instantiation, additional
+ /// instantiations are pushed onto the stack up to a
+ /// user-configurable limit LangOptions::InstantiationDepth.
+ llvm::SmallVector<ActiveTemplateInstantiation, 16>
+ ActiveTemplateInstantiations;
+
+ /// \brief The last template from which a template instantiation
+ /// error or warning was produced.
+ ///
+ /// This value is used to suppress printing of redundant template
+ /// instantiation backtraces when there are multiple errors in the
+ /// same instantiation. FIXME: Does this belong in Sema? It's tough
+ /// to implement it anywhere else.
+ ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
+
+ /// \brief A stack object to be created when performing template
+ /// instantiation.
+ ///
+ /// Construction of an object of type \c InstantiatingTemplate
+ /// pushes the current instantiation onto the stack of active
+ /// instantiations. If the size of this stack exceeds the maximum
+ /// number of recursive template instantiations, construction
+ /// produces an error and evaluates true.
+ ///
+ /// Destruction of this object will pop the named instantiation off
+ /// the stack.
+ struct InstantiatingTemplate {
+ /// \brief Note that we are instantiating a class template,
+ /// function template, or a member thereof.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ Decl *Entity,
+ SourceRange InstantiationRange = SourceRange());
+
+ /// \brief Note that we are instantiating a default argument in a
+ /// template-id.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange = SourceRange());
+
+ /// \brief Note that we have finished instantiating this template.
+ void Clear();
+
+ ~InstantiatingTemplate() { Clear(); }
+
+ /// \brief Determines whether we have exceeded the maximum
+ /// recursive template instantiations.
+ operator bool() const { return Invalid; }
+
+ private:
+ Sema &SemaRef;
+ bool Invalid;
+
+ bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
+ SourceRange InstantiationRange);
+
+ InstantiatingTemplate(const InstantiatingTemplate&); // not implemented
+
+ InstantiatingTemplate&
+ operator=(const InstantiatingTemplate&); // not implemented
+ };
+
+ void PrintInstantiationStack();
+
+ /// \brief A stack-allocated class that identifies which local
+ /// variable declaration instantiations are present in this scope.
+ ///
+ /// A new instance of this class type will be created whenever we
+ /// instantiate a new function declaration, which will have its own
+ /// set of parameter declarations.
+ class LocalInstantiationScope {
+ /// \brief Reference to the semantic analysis that is performing
+ /// this template instantiation.
+ Sema &SemaRef;
+
+ /// \brief A mapping from local declarations that occur
+ /// within a template to their instantiations.
+ ///
+ /// This mapping is used during instantiation to keep track of,
+ /// e.g., function parameter and variable declarations. For example,
+ /// given:
+ ///
+ /// \code
+ /// template<typename T> T add(T x, T y) { return x + y; }
+ /// \endcode
+ ///
+ /// when we instantiate add<int>, we will introduce a mapping from
+ /// the ParmVarDecl for 'x' that occurs in the template to the
+ /// instantiated ParmVarDecl for 'x'.
+ llvm::DenseMap<const Decl *, Decl *> LocalDecls;
+
+ /// \brief The outer scope, in which contains local variable
+ /// definitions from some other instantiation (that is not
+ /// relevant to this particular scope).
+ LocalInstantiationScope *Outer;
+
+ // This class is non-copyable
+ LocalInstantiationScope(const LocalInstantiationScope &);
+ LocalInstantiationScope &operator=(const LocalInstantiationScope &);
+
+ public:
+ LocalInstantiationScope(Sema &SemaRef)
+ : SemaRef(SemaRef), Outer(SemaRef.CurrentInstantiationScope) {
+ SemaRef.CurrentInstantiationScope = this;
+ }
+
+ ~LocalInstantiationScope() {
+ SemaRef.CurrentInstantiationScope = Outer;
+ }
+
+ Decl *getInstantiationOf(const Decl *D) {
+ Decl *Result = LocalDecls[D];
+ assert(Result && "declaration was not instantiated in this scope!");
+ return Result;
+ }
+
+ VarDecl *getInstantiationOf(const VarDecl *Var) {
+ return cast<VarDecl>(getInstantiationOf(cast<Decl>(Var)));
+ }
+
+ ParmVarDecl *getInstantiationOf(const ParmVarDecl *Var) {
+ return cast<ParmVarDecl>(getInstantiationOf(cast<Decl>(Var)));
+ }
+
+ void InstantiatedLocal(const Decl *D, Decl *Inst) {
+ Decl *&Stored = LocalDecls[D];
+ assert(!Stored && "Already instantiated this local");
+ Stored = Inst;
+ }
+ };
+
+ /// \brief The current instantiation scope used to store local
+ /// variables.
+ LocalInstantiationScope *CurrentInstantiationScope;
+
+ QualType InstantiateType(QualType T, const TemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity);
+
+ OwningExprResult InstantiateExpr(Expr *E,
+ const TemplateArgumentList &TemplateArgs);
+
+ OwningStmtResult InstantiateStmt(Stmt *S,
+ const TemplateArgumentList &TemplateArgs);
+ OwningStmtResult InstantiateCompoundStmt(CompoundStmt *S,
+ const TemplateArgumentList &TemplateArgs,
+ bool isStmtExpr);
+
+ Decl *InstantiateDecl(Decl *D, DeclContext *Owner,
+ const TemplateArgumentList &TemplateArgs);
+
+ bool
+ InstantiateBaseSpecifiers(CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const TemplateArgumentList &TemplateArgs);
+
+ bool
+ InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
+ const TemplateArgumentList &TemplateArgs,
+ bool ExplicitInstantiation);
+
+ bool
+ InstantiateClassTemplateSpecialization(
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ bool ExplicitInstantiation);
+
+ void InstantiateClassMembers(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ const TemplateArgumentList &TemplateArgs);
+
+ void InstantiateClassTemplateSpecializationMembers(
+ SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec);
+
+ NestedNameSpecifier *
+ InstantiateNestedNameSpecifier(NestedNameSpecifier *NNS,
+ SourceRange Range,
+ const TemplateArgumentList &TemplateArgs);
+
+ TemplateName
+ InstantiateTemplateName(TemplateName Name, SourceLocation Loc,
+ const TemplateArgumentList &TemplateArgs);
+
+ void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
+ FunctionDecl *Function);
+ void InstantiateVariableDefinition(VarDecl *Var);
+
+ NamedDecl *InstantiateCurrentDeclRef(NamedDecl *D);
+
+ // Simple function for cloning expressions.
+ template<typename T>
+ OwningExprResult Clone(T *E) {
+ assert(!E->isValueDependent() && !E->isTypeDependent() &&
+ "expression is value or type dependent!");
+ return Owned(E->Clone(Context));
+ }
+
+ // Objective-C declarations.
+ virtual DeclPtrTy ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *SuperName,
+ SourceLocation SuperLoc,
+ const DeclPtrTy *ProtoRefs,
+ unsigned NumProtoRefs,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList);
+
+ virtual DeclPtrTy ActOnCompatiblityAlias(
+ SourceLocation AtCompatibilityAliasLoc,
+ IdentifierInfo *AliasName, SourceLocation AliasLocation,
+ IdentifierInfo *ClassName, SourceLocation ClassLocation);
+
+ void CheckForwardProtocolDeclarationForCircularDependency(
+ IdentifierInfo *PName,
+ SourceLocation &PLoc, SourceLocation PrevLoc,
+ const ObjCList<ObjCProtocolDecl> &PList);
+
+ virtual DeclPtrTy ActOnStartProtocolInterface(
+ SourceLocation AtProtoInterfaceLoc,
+ IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
+ const DeclPtrTy *ProtoRefNames, unsigned NumProtoRefs,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList);
+
+ virtual DeclPtrTy ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *CategoryName,
+ SourceLocation CategoryLoc,
+ const DeclPtrTy *ProtoRefs,
+ unsigned NumProtoRefs,
+ SourceLocation EndProtoLoc);
+
+ virtual DeclPtrTy ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc);
+
+ virtual DeclPtrTy ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *CatName,
+ SourceLocation CatLoc);
+
+ virtual DeclPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
+ IdentifierInfo **IdentList,
+ unsigned NumElts);
+
+ virtual DeclPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
+ const IdentifierLocPair *IdentList,
+ unsigned NumElts,
+ AttributeList *attrList);
+
+ virtual void FindProtocolDeclaration(bool WarnOnDeclarations,
+ const IdentifierLocPair *ProtocolId,
+ unsigned NumProtocols,
+ llvm::SmallVectorImpl<DeclPtrTy> &Protocols);
+
+ /// Ensure attributes are consistent with type.
+ /// \param [in, out] Attributes The attributes to check; they will
+ /// be modified to be consistent with \arg PropertyTy.
+ void CheckObjCPropertyAttributes(QualType PropertyTy,
+ SourceLocation Loc,
+ unsigned &Attributes);
+ void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *DC);
+ void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
+ ObjCPropertyDecl *SuperProperty,
+ const IdentifierInfo *Name);
+ void ComparePropertiesInBaseAndSuper(ObjCInterfaceDecl *IDecl);
+
+ void MergeProtocolPropertiesIntoClass(Decl *CDecl,
+ DeclPtrTy MergeProtocols);
+
+ void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
+ ObjCInterfaceDecl *ID);
+
+ void MergeOneProtocolPropertiesIntoClass(Decl *CDecl,
+ ObjCProtocolDecl *PDecl);
+
+ virtual void ActOnAtEnd(SourceLocation AtEndLoc, DeclPtrTy classDecl,
+ DeclPtrTy *allMethods = 0, unsigned allNum = 0,
+ DeclPtrTy *allProperties = 0, unsigned pNum = 0,
+ DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0);
+
+ virtual DeclPtrTy ActOnProperty(Scope *S, SourceLocation AtLoc,
+ FieldDeclarator &FD, ObjCDeclSpec &ODS,
+ Selector GetterSel, Selector SetterSel,
+ DeclPtrTy ClassCategory,
+ bool *OverridingProperty,
+ tok::ObjCKeywordKind MethodImplKind);
+
+ virtual DeclPtrTy ActOnPropertyImplDecl(SourceLocation AtLoc,
+ SourceLocation PropertyLoc,
+ bool ImplKind,DeclPtrTy ClassImplDecl,
+ IdentifierInfo *PropertyId,
+ IdentifierInfo *PropertyIvar);
+
+ virtual DeclPtrTy ActOnMethodDeclaration(
+ SourceLocation BeginLoc, // location of the + or -.
+ SourceLocation EndLoc, // location of the ; or {.
+ tok::TokenKind MethodType,
+ DeclPtrTy ClassDecl, ObjCDeclSpec &ReturnQT, TypeTy *ReturnType,
+ Selector Sel,
+ // optional arguments. The number of types/arguments is obtained
+ // from the Sel.getNumArgs().
+ ObjCArgInfo *ArgInfo,
+ llvm::SmallVectorImpl<Declarator> &Cdecls,
+ AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
+ bool isVariadic = false);
+
+ // Helper method for ActOnClassMethod/ActOnInstanceMethod.
+ // Will search "local" class/category implementations for a method decl.
+ // Will also search in class's root looking for instance method.
+ // Returns 0 if no method is found.
+ ObjCMethodDecl *LookupPrivateClassMethod(Selector Sel,
+ ObjCInterfaceDecl *CDecl);
+ ObjCMethodDecl *LookupPrivateInstanceMethod(Selector Sel,
+ ObjCInterfaceDecl *ClassDecl);
+
+ virtual OwningExprResult ActOnClassPropertyRefExpr(
+ IdentifierInfo &receiverName,
+ IdentifierInfo &propertyName,
+ SourceLocation &receiverNameLoc,
+ SourceLocation &propertyNameLoc);
+
+ // ActOnClassMessage - used for both unary and keyword messages.
+ // ArgExprs is optional - if it is present, the number of expressions
+ // is obtained from NumArgs.
+ virtual ExprResult ActOnClassMessage(
+ Scope *S,
+ IdentifierInfo *receivingClassName, Selector Sel, SourceLocation lbrac,
+ SourceLocation receiverLoc, SourceLocation selectorLoc,SourceLocation rbrac,
+ ExprTy **ArgExprs, unsigned NumArgs);
+
+ // ActOnInstanceMessage - used for both unary and keyword messages.
+ // ArgExprs is optional - if it is present, the number of expressions
+ // is obtained from NumArgs.
+ virtual ExprResult ActOnInstanceMessage(
+ ExprTy *receiver, Selector Sel,
+ SourceLocation lbrac, SourceLocation receiverLoc, SourceLocation rbrac,
+ ExprTy **ArgExprs, unsigned NumArgs);
+
+ /// ActOnPragmaPack - Called on well formed #pragma pack(...).
+ virtual void ActOnPragmaPack(PragmaPackKind Kind,
+ IdentifierInfo *Name,
+ ExprTy *Alignment,
+ SourceLocation PragmaLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ /// ActOnPragmaUnused - Called on well-formed '#pragma unused'.
+ virtual void ActOnPragmaUnused(ExprTy **Exprs, unsigned NumExprs,
+ SourceLocation PragmaLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ /// getPragmaPackAlignment() - Return the current alignment as specified by
+ /// the current #pragma pack directive, or 0 if none is currently active.
+ unsigned getPragmaPackAlignment() const;
+
+ /// FreePackedContext - Deallocate and null out PackContext.
+ void FreePackedContext();
+
+ /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
+ /// cast. If there is already an implicit cast, merge into the existing one.
+ /// If isLvalue, the result of the cast is an lvalue.
+ void ImpCastExprToType(Expr *&Expr, QualType Type, bool isLvalue = false);
+
+ // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
+ // functions and arrays to their respective pointers (C99 6.3.2.1).
+ Expr *UsualUnaryConversions(Expr *&expr);
+
+ // DefaultFunctionArrayConversion - converts functions and arrays
+ // to their respective pointers (C99 6.3.2.1).
+ void DefaultFunctionArrayConversion(Expr *&expr);
+
+ // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
+ // do not have a prototype. Integer promotions are performed on each
+ // argument, and arguments that have type float are promoted to double.
+ void DefaultArgumentPromotion(Expr *&Expr);
+
+ // Used for emitting the right warning by DefaultVariadicArgumentPromotion
+ enum VariadicCallType {
+ VariadicFunction,
+ VariadicBlock,
+ VariadicMethod
+ };
+
+ // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
+ // will warn if the resulting type is not a POD type.
+ bool DefaultVariadicArgumentPromotion(Expr *&Expr, VariadicCallType CT);
+
+ // UsualArithmeticConversions - performs the UsualUnaryConversions on it's
+ // operands and then handles various conversions that are common to binary
+ // operators (C99 6.3.1.8). If both operands aren't arithmetic, this
+ // routine returns the first non-arithmetic type found. The client is
+ // responsible for emitting appropriate error diagnostics.
+ QualType UsualArithmeticConversions(Expr *&lExpr, Expr *&rExpr,
+ bool isCompAssign = false);
+
+ /// UsualArithmeticConversionsType - handles the various conversions
+ /// that are common to binary operators (C99 6.3.1.8, C++ [expr]p9)
+ /// and returns the result type of that conversion.
+ QualType UsualArithmeticConversionsType(QualType lhs, QualType rhs);
+
+
+ /// AssignConvertType - All of the 'assignment' semantic checks return this
+ /// enum to indicate whether the assignment was allowed. These checks are
+ /// done for simple assignments, as well as initialization, return from
+ /// function, argument passing, etc. The query is phrased in terms of a
+ /// source and destination type.
+ enum AssignConvertType {
+ /// Compatible - the types are compatible according to the standard.
+ Compatible,
+
+ /// PointerToInt - The assignment converts a pointer to an int, which we
+ /// accept as an extension.
+ PointerToInt,
+
+ /// IntToPointer - The assignment converts an int to a pointer, which we
+ /// accept as an extension.
+ IntToPointer,
+
+ /// FunctionVoidPointer - The assignment is between a function pointer and
+ /// void*, which the standard doesn't allow, but we accept as an extension.
+ FunctionVoidPointer,
+
+ /// IncompatiblePointer - The assignment is between two pointers types that
+ /// are not compatible, but we accept them as an extension.
+ IncompatiblePointer,
+
+ /// IncompatiblePointer - The assignment is between two pointers types which
+ /// point to integers which have a different sign, but are otherwise identical.
+ /// This is a subset of the above, but broken out because it's by far the most
+ /// common case of incompatible pointers.
+ IncompatiblePointerSign,
+
+ /// CompatiblePointerDiscardsQualifiers - The assignment discards
+ /// c/v/r qualifiers, which we accept as an extension.
+ CompatiblePointerDiscardsQualifiers,
+
+ /// IncompatibleVectors - The assignment is between two vector types that
+ /// have the same size, which we accept as an extension.
+ IncompatibleVectors,
+
+ /// IntToBlockPointer - The assignment converts an int to a block
+ /// pointer. We disallow this.
+ IntToBlockPointer,
+
+ /// IncompatibleBlockPointer - The assignment is between two block
+ /// pointers types that are not compatible.
+ IncompatibleBlockPointer,
+
+ /// IncompatibleObjCQualifiedId - The assignment is between a qualified
+ /// id type and something else (that is incompatible with it). For example,
+ /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
+ IncompatibleObjCQualifiedId,
+
+ /// Incompatible - We reject this conversion outright, it is invalid to
+ /// represent it in the AST.
+ Incompatible
+ };
+
+ /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
+ /// assignment conversion type specified by ConvTy. This returns true if the
+ /// conversion was invalid or false if the conversion was accepted.
+ bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
+ SourceLocation Loc,
+ QualType DstType, QualType SrcType,
+ Expr *SrcExpr, const char *Flavor);
+
+ /// CheckAssignmentConstraints - Perform type checking for assignment,
+ /// argument passing, variable initialization, and function return values.
+ /// This routine is only used by the following two methods. C99 6.5.16.
+ AssignConvertType CheckAssignmentConstraints(QualType lhs, QualType rhs);
+
+ // CheckSingleAssignmentConstraints - Currently used by
+ // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
+ // this routine performs the default function/array converions.
+ AssignConvertType CheckSingleAssignmentConstraints(QualType lhs,
+ Expr *&rExpr);
+
+ // \brief If the lhs type is a transparent union, check whether we
+ // can initialize the transparent union with the given expression.
+ AssignConvertType CheckTransparentUnionArgumentConstraints(QualType lhs,
+ Expr *&rExpr);
+
+ // Helper function for CheckAssignmentConstraints (C99 6.5.16.1p1)
+ AssignConvertType CheckPointerTypesForAssignment(QualType lhsType,
+ QualType rhsType);
+
+ // Helper function for CheckAssignmentConstraints involving two
+ // block pointer types.
+ AssignConvertType CheckBlockPointerTypesForAssignment(QualType lhsType,
+ QualType rhsType);
+
+ bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
+
+ bool PerformImplicitConversion(Expr *&From, QualType ToType,
+ const char *Flavor,
+ bool AllowExplicit = false,
+ bool Elidable = false);
+ bool PerformImplicitConversion(Expr *&From, QualType ToType,
+ const ImplicitConversionSequence& ICS,
+ const char *Flavor);
+ bool PerformImplicitConversion(Expr *&From, QualType ToType,
+ const StandardConversionSequence& SCS,
+ const char *Flavor);
+
+ /// the following "Check" methods will return a valid/converted QualType
+ /// or a null QualType (indicating an error diagnostic was issued).
+
+ /// type checking binary operators (subroutines of CreateBuiltinBinOp).
+ QualType InvalidOperands(SourceLocation l, Expr *&lex, Expr *&rex);
+ QualType CheckPointerToMemberOperands( // C++ 5.5
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isIndirect);
+ QualType CheckMultiplyDivideOperands( // C99 6.5.5
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false);
+ QualType CheckRemainderOperands( // C99 6.5.5
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false);
+ QualType CheckAdditionOperands( // C99 6.5.6
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, QualType* CompLHSTy = 0);
+ QualType CheckSubtractionOperands( // C99 6.5.6
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, QualType* CompLHSTy = 0);
+ QualType CheckShiftOperands( // C99 6.5.7
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false);
+ QualType CheckCompareOperands( // C99 6.5.8/9
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, unsigned Opc, bool isRelational);
+ QualType CheckBitwiseOperands( // C99 6.5.[10...12]
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false);
+ QualType CheckLogicalOperands( // C99 6.5.[13,14]
+ Expr *&lex, Expr *&rex, SourceLocation OpLoc);
+ // CheckAssignmentOperands is used for both simple and compound assignment.
+ // For simple assignment, pass both expressions and a null converted type.
+ // For compound assignment, pass both expressions and the converted type.
+ QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
+ Expr *lex, Expr *&rex, SourceLocation OpLoc, QualType convertedType);
+ QualType CheckCommaOperands( // C99 6.5.17
+ Expr *lex, Expr *&rex, SourceLocation OpLoc);
+ QualType CheckConditionalOperands( // C99 6.5.15
+ Expr *&cond, Expr *&lhs, Expr *&rhs, SourceLocation questionLoc);
+ QualType CXXCheckConditionalOperands( // C++ 5.16
+ Expr *&cond, Expr *&lhs, Expr *&rhs, SourceLocation questionLoc);
+ QualType FindCompositePointerType(Expr *&E1, Expr *&E2); // C++ 5.9
+
+ /// type checking for vector binary operators.
+ inline QualType CheckVectorOperands(SourceLocation l, Expr *&lex, Expr *&rex);
+ inline QualType CheckVectorCompareOperands(Expr *&lex, Expr *&rx,
+ SourceLocation l, bool isRel);
+
+ /// type checking unary operators (subroutines of ActOnUnaryOp).
+ /// C99 6.5.3.1, 6.5.3.2, 6.5.3.4
+ QualType CheckIncrementDecrementOperand(Expr *op, SourceLocation OpLoc,
+ bool isInc);
+ QualType CheckAddressOfOperand(Expr *op, SourceLocation OpLoc);
+ QualType CheckIndirectionOperand(Expr *op, SourceLocation OpLoc);
+ QualType CheckRealImagOperand(Expr *&Op, SourceLocation OpLoc, bool isReal);
+
+ /// type checking primary expressions.
+ QualType CheckExtVectorComponent(QualType baseType, SourceLocation OpLoc,
+ IdentifierInfo &Comp, SourceLocation CmpLoc);
+
+ /// type checking declaration initializers (C99 6.7.8)
+
+ bool CheckInitializerTypes(Expr *&simpleInit_or_initList, QualType &declType,
+ SourceLocation InitLoc,DeclarationName InitEntity,
+ bool DirectInit);
+ bool CheckInitList(InitListExpr *&InitList, QualType &DeclType);
+ bool CheckForConstantInitializer(Expr *e, QualType t);
+
+ bool CheckValueInitialization(QualType Type, SourceLocation Loc);
+
+ // type checking C++ declaration initializers (C++ [dcl.init]).
+
+ /// ReferenceCompareResult - Expresses the result of comparing two
+ /// types (cv1 T1 and cv2 T2) to determine their compatibility for the
+ /// purposes of initialization by reference (C++ [dcl.init.ref]p4).
+ enum ReferenceCompareResult {
+ /// Ref_Incompatible - The two types are incompatible, so direct
+ /// reference binding is not possible.
+ Ref_Incompatible = 0,
+ /// Ref_Related - The two types are reference-related, which means
+ /// that their unqualified forms (T1 and T2) are either the same
+ /// or T1 is a base class of T2.
+ Ref_Related,
+ /// Ref_Compatible_With_Added_Qualification - The two types are
+ /// reference-compatible with added qualification, meaning that
+ /// they are reference-compatible and the qualifiers on T1 (cv1)
+ /// are greater than the qualifiers on T2 (cv2).
+ Ref_Compatible_With_Added_Qualification,
+ /// Ref_Compatible - The two types are reference-compatible and
+ /// have equivalent qualifiers (cv1 == cv2).
+ Ref_Compatible
+ };
+
+ ReferenceCompareResult CompareReferenceRelationship(QualType T1, QualType T2,
+ bool& DerivedToBase);
+
+ bool CheckReferenceInit(Expr *&simpleInit_or_initList, QualType declType,
+ ImplicitConversionSequence *ICS = 0,
+ bool SuppressUserConversions = false,
+ bool AllowExplicit = false,
+ bool ForceRValue = false);
+
+ /// CheckCastTypes - Check type constraints for casting between types.
+ bool CheckCastTypes(SourceRange TyRange, QualType CastTy, Expr *&CastExpr);
+
+ // CheckVectorCast - check type constraints for vectors.
+ // Since vectors are an extension, there are no C standard reference for this.
+ // We allow casting between vectors and integer datatypes of the same size.
+ // returns true if the cast is invalid
+ bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty);
+
+ /// CheckMessageArgumentTypes - Check types in an Obj-C message send.
+ /// \param Method - May be null.
+ /// \param [out] ReturnType - The return type of the send.
+ /// \return true iff there were any incompatible types.
+ bool CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs, Selector Sel,
+ ObjCMethodDecl *Method, bool isClassMessage,
+ SourceLocation lbrac, SourceLocation rbrac,
+ QualType &ReturnType);
+
+ /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
+ bool CheckCXXBooleanCondition(Expr *&CondExpr);
+
+ /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
+ /// the specified width and sign. If an overflow occurs, detect it and emit
+ /// the specified diagnostic.
+ void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
+ unsigned NewWidth, bool NewSign,
+ SourceLocation Loc, unsigned DiagID);
+
+ bool ObjCQualifiedIdTypesAreCompatible(QualType LHS, QualType RHS,
+ bool ForCompare);
+
+ /// Checks that the Objective-C declaration is declared in the global scope.
+ /// Emits an error and marks the declaration as invalid if it's not declared
+ /// in the global scope.
+ bool CheckObjCDeclScope(Decl *D);
+
+ void InitBuiltinVaListType();
+
+ /// VerifyIntegerConstantExpression - verifies that an expression is an ICE,
+ /// and reports the appropriate diagnostics. Returns false on success.
+ /// Can optionally return the value of the expression.
+ bool VerifyIntegerConstantExpression(const Expr *E, llvm::APSInt *Result = 0);
+
+ /// VerifyBitField - verifies that a bit field expression is an ICE and has
+ /// the correct width, and that the field type is valid.
+ /// Returns false on success.
+ bool VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
+ QualType FieldTy, const Expr *BitWidth);
+
+ //===--------------------------------------------------------------------===//
+ // Extra semantic analysis beyond the C type system
+private:
+ Action::OwningExprResult CheckFunctionCall(FunctionDecl *FDecl,
+ CallExpr *TheCall);
+
+ Action::OwningExprResult CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall);
+ SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
+ unsigned ByteNo) const;
+ bool CheckObjCString(Expr *Arg);
+ bool SemaBuiltinVAStart(CallExpr *TheCall);
+ bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
+ bool SemaBuiltinStackAddress(CallExpr *TheCall);
+
+public:
+ // Used by C++ template instantiation.
+ Action::OwningExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
+
+private:
+ bool SemaBuiltinPrefetch(CallExpr *TheCall);
+ bool SemaBuiltinObjectSize(CallExpr *TheCall);
+ bool SemaBuiltinLongjmp(CallExpr *TheCall);
+ bool SemaBuiltinAtomicOverloaded(CallExpr *TheCall);
+ bool SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg);
+ void CheckPrintfString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
+ const CallExpr *TheCall, bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg);
+ void CheckNonNullArguments(const NonNullAttr *NonNull,
+ const CallExpr *TheCall);
+ void CheckPrintfArguments(const CallExpr *TheCall, bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg);
+ void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
+ SourceLocation ReturnLoc);
+ void CheckFloatComparison(SourceLocation loc, Expr* lex, Expr* rex);
+};
+
+
+//===--------------------------------------------------------------------===//
+// Typed version of Parser::ExprArg (smart pointer for wrapping Expr pointers).
+template <typename T>
+class ExprOwningPtr : public Action::ExprArg {
+public:
+ ExprOwningPtr(Sema *S, T *expr) : Action::ExprArg(*S, expr) {}
+
+ void reset(T* p) { Action::ExprArg::operator=(p); }
+ T* get() const { return static_cast<T*>(Action::ExprArg::get()); }
+ T* take() { return static_cast<T*>(Action::ExprArg::take()); }
+ T* release() { return take(); }
+
+ T& operator*() const { return *get(); }
+ T* operator->() const { return get(); }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp
new file mode 100644
index 0000000..bae69ac
--- /dev/null
+++ b/lib/Sema/SemaAccess.cpp
@@ -0,0 +1,124 @@
+//===---- SemaAccess.cpp - C++ Access Control -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ access control semantics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SemaInherit.h"
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+using namespace clang;
+
+/// SetMemberAccessSpecifier - Set the access specifier of a member.
+/// Returns true on error (when the previous member decl access specifier
+/// is different from the new member decl access specifier).
+bool Sema::SetMemberAccessSpecifier(NamedDecl *MemberDecl,
+ NamedDecl *PrevMemberDecl,
+ AccessSpecifier LexicalAS) {
+ if (!PrevMemberDecl) {
+ // Use the lexical access specifier.
+ MemberDecl->setAccess(LexicalAS);
+ return false;
+ }
+
+ // C++ [class.access.spec]p3: When a member is redeclared its access
+ // specifier must be same as its initial declaration.
+ if (LexicalAS != AS_none && LexicalAS != PrevMemberDecl->getAccess()) {
+ Diag(MemberDecl->getLocation(),
+ diag::err_class_redeclared_with_different_access)
+ << MemberDecl << LexicalAS;
+ Diag(PrevMemberDecl->getLocation(), diag::note_previous_access_declaration)
+ << PrevMemberDecl << PrevMemberDecl->getAccess();
+ return true;
+ }
+
+ MemberDecl->setAccess(PrevMemberDecl->getAccess());
+ return false;
+}
+
+/// CheckBaseClassAccess - Check that a derived class can access its base class
+/// and report an error if it can't. [class.access.base]
+bool Sema::CheckBaseClassAccess(QualType Derived, QualType Base,
+ unsigned InaccessibleBaseID,
+ BasePaths& Paths, SourceLocation AccessLoc,
+ DeclarationName Name) {
+ Base = Context.getCanonicalType(Base).getUnqualifiedType();
+ assert(!Paths.isAmbiguous(Base) &&
+ "Can't check base class access if set of paths is ambiguous");
+ assert(Paths.isRecordingPaths() &&
+ "Can't check base class access without recorded paths");
+
+ if (!getLangOptions().AccessControl)
+ return false;
+
+ const CXXBaseSpecifier *InacessibleBase = 0;
+
+ const CXXRecordDecl* CurrentClassDecl = 0;
+ if (CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(getCurFunctionDecl()))
+ CurrentClassDecl = MD->getParent();
+
+ for (BasePaths::paths_iterator Path = Paths.begin(), PathsEnd = Paths.end();
+ Path != PathsEnd; ++Path) {
+
+ bool FoundInaccessibleBase = false;
+
+ for (BasePath::const_iterator Element = Path->begin(),
+ ElementEnd = Path->end(); Element != ElementEnd; ++Element) {
+ const CXXBaseSpecifier *Base = Element->Base;
+
+ switch (Base->getAccessSpecifier()) {
+ default:
+ assert(0 && "invalid access specifier");
+ case AS_public:
+ // Nothing to do.
+ break;
+ case AS_private:
+ // FIXME: Check if the current function/class is a friend.
+ if (CurrentClassDecl != Element->Class)
+ FoundInaccessibleBase = true;
+ break;
+ case AS_protected:
+ // FIXME: Implement
+ break;
+ }
+
+ if (FoundInaccessibleBase) {
+ InacessibleBase = Base;
+ break;
+ }
+ }
+
+ if (!FoundInaccessibleBase) {
+ // We found a path to the base, our work here is done.
+ InacessibleBase = 0;
+ break;
+ }
+ }
+
+ if (InacessibleBase) {
+ Diag(AccessLoc, InaccessibleBaseID)
+ << Derived << Base << Name;
+
+ AccessSpecifier AS = InacessibleBase->getAccessSpecifierAsWritten();
+
+ // If there's no written access specifier, then the inheritance specifier
+ // is implicitly private.
+ if (AS == AS_none)
+ Diag(InacessibleBase->getSourceRange().getBegin(),
+ diag::note_inheritance_implicitly_private_here);
+ else
+ Diag(InacessibleBase->getSourceRange().getBegin(),
+ diag::note_inheritance_specifier_here) << AS;
+
+ return true;
+ }
+
+ return false;
+}
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
new file mode 100644
index 0000000..1bf8444
--- /dev/null
+++ b/lib/Sema/SemaAttr.cpp
@@ -0,0 +1,211 @@
+//===--- SemaAttr.cpp - Semantic Analysis for Attributes ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for non-trivial attributes and
+// pragmas.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/Expr.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Pragma Packed
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// PragmaPackStack - Simple class to wrap the stack used by #pragma
+ /// pack.
+ class PragmaPackStack {
+ typedef std::vector< std::pair<unsigned, IdentifierInfo*> > stack_ty;
+
+ /// Alignment - The current user specified alignment.
+ unsigned Alignment;
+
+ /// Stack - Entries in the #pragma pack stack, consisting of saved
+ /// alignments and optional names.
+ stack_ty Stack;
+
+ public:
+ PragmaPackStack() : Alignment(0) {}
+
+ void setAlignment(unsigned A) { Alignment = A; }
+ unsigned getAlignment() { return Alignment; }
+
+ /// push - Push the current alignment onto the stack, optionally
+ /// using the given \arg Name for the record, if non-zero.
+ void push(IdentifierInfo *Name) {
+ Stack.push_back(std::make_pair(Alignment, Name));
+ }
+
+ /// pop - Pop a record from the stack and restore the current
+ /// alignment to the previous value. If \arg Name is non-zero then
+ /// the first such named record is popped, otherwise the top record
+ /// is popped. Returns true if the pop succeeded.
+ bool pop(IdentifierInfo *Name);
+ };
+} // end anonymous namespace.
+
+bool PragmaPackStack::pop(IdentifierInfo *Name) {
+ if (Stack.empty())
+ return false;
+
+ // If name is empty just pop top.
+ if (!Name) {
+ Alignment = Stack.back().first;
+ Stack.pop_back();
+ return true;
+ }
+
+ // Otherwise, find the named record.
+ for (unsigned i = Stack.size(); i != 0; ) {
+ --i;
+ if (Stack[i].second == Name) {
+ // Found it, pop up to and including this record.
+ Alignment = Stack[i].first;
+ Stack.erase(Stack.begin() + i, Stack.end());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+/// FreePackedContext - Deallocate and null out PackContext.
+void Sema::FreePackedContext() {
+ delete static_cast<PragmaPackStack*>(PackContext);
+ PackContext = 0;
+}
+
+/// getPragmaPackAlignment() - Return the current alignment as specified by
+/// the current #pragma pack directive, or 0 if none is currently active.
+unsigned Sema::getPragmaPackAlignment() const {
+ if (PackContext)
+ return static_cast<PragmaPackStack*>(PackContext)->getAlignment();
+ return 0;
+}
+
+void Sema::ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name,
+ ExprTy *alignment, SourceLocation PragmaLoc,
+ SourceLocation LParenLoc, SourceLocation RParenLoc) {
+ Expr *Alignment = static_cast<Expr *>(alignment);
+
+ // If specified then alignment must be a "small" power of two.
+ unsigned AlignmentVal = 0;
+ if (Alignment) {
+ llvm::APSInt Val;
+
+ // pack(0) is like pack(), which just works out since that is what
+ // we use 0 for in PackAttr.
+ if (!Alignment->isIntegerConstantExpr(Val, Context) ||
+ !(Val == 0 || Val.isPowerOf2()) ||
+ Val.getZExtValue() > 16) {
+ Diag(PragmaLoc, diag::warn_pragma_pack_invalid_alignment);
+ Alignment->Destroy(Context);
+ return; // Ignore
+ }
+
+ AlignmentVal = (unsigned) Val.getZExtValue();
+ }
+
+ if (PackContext == 0)
+ PackContext = new PragmaPackStack();
+
+ PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext);
+
+ switch (Kind) {
+ case Action::PPK_Default: // pack([n])
+ Context->setAlignment(AlignmentVal);
+ break;
+
+ case Action::PPK_Show: // pack(show)
+ // Show the current alignment, making sure to show the right value
+ // for the default.
+ AlignmentVal = Context->getAlignment();
+ // FIXME: This should come from the target.
+ if (AlignmentVal == 0)
+ AlignmentVal = 8;
+ Diag(PragmaLoc, diag::warn_pragma_pack_show) << AlignmentVal;
+ break;
+
+ case Action::PPK_Push: // pack(push [, id] [, [n])
+ Context->push(Name);
+ // Set the new alignment if specified.
+ if (Alignment)
+ Context->setAlignment(AlignmentVal);
+ break;
+
+ case Action::PPK_Pop: // pack(pop [, id] [, n])
+ // MSDN, C/C++ Preprocessor Reference > Pragma Directives > pack:
+ // "#pragma pack(pop, identifier, n) is undefined"
+ if (Alignment && Name)
+ Diag(PragmaLoc, diag::warn_pragma_pack_pop_identifer_and_alignment);
+
+ // Do the pop.
+ if (!Context->pop(Name)) {
+ // If a name was specified then failure indicates the name
+ // wasn't found. Otherwise failure indicates the stack was
+ // empty.
+ Diag(PragmaLoc, diag::warn_pragma_pack_pop_failed)
+ << (Name ? "no record matching name" : "stack empty");
+
+ // FIXME: Warn about popping named records as MSVC does.
+ } else {
+ // Pop succeeded, set the new alignment if specified.
+ if (Alignment)
+ Context->setAlignment(AlignmentVal);
+ }
+ break;
+
+ default:
+ assert(0 && "Invalid #pragma pack kind.");
+ }
+}
+
+void Sema::ActOnPragmaUnused(ExprTy **Exprs, unsigned NumExprs,
+ SourceLocation PragmaLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+
+ // Verify that all of the expressions are valid before
+ // modifying the attributes of any referenced decl.
+ Expr *ErrorExpr = 0;
+
+ for (unsigned i = 0; i < NumExprs; ++i) {
+ Expr *Ex = (Expr*) Exprs[i];
+ if (!isa<DeclRefExpr>(Ex)) {
+ ErrorExpr = Ex;
+ break;
+ }
+
+ Decl *d = cast<DeclRefExpr>(Ex)->getDecl();;
+
+ if (!isa<VarDecl>(d) || !cast<VarDecl>(d)->hasLocalStorage()) {
+ ErrorExpr = Ex;
+ break;
+ }
+ }
+
+ // Delete the expressions if we encountered any error.
+ if (ErrorExpr) {
+ Diag(ErrorExpr->getLocStart(), diag::warn_pragma_unused_expected_localvar);
+ for (unsigned i = 0; i < NumExprs; ++i)
+ ((Expr*) Exprs[i])->Destroy(Context);
+ return;
+ }
+
+ // Otherwise, add the 'unused' attribute to each referenced declaration.
+ for (unsigned i = 0; i < NumExprs; ++i) {
+ DeclRefExpr *DR = (DeclRefExpr*) Exprs[i];
+ DR->getDecl()->addAttr(::new (Context) UnusedAttr());
+ DR->Destroy(Context);
+ }
+}
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
new file mode 100644
index 0000000..11ac0bd
--- /dev/null
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -0,0 +1,312 @@
+//===--- SemaCXXScopeSpec.cpp - Semantic Analysis for C++ scope specifiers-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements C++ semantic analysis for scope specifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/Parse/DeclSpec.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace clang;
+
+/// \brief Compute the DeclContext that is associated with the given
+/// scope specifier.
+DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS) {
+ if (!SS.isSet() || SS.isInvalid())
+ return 0;
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ if (NNS->isDependent()) {
+ // If this nested-name-specifier refers to the current
+ // instantiation, return its DeclContext.
+ if (CXXRecordDecl *Record = getCurrentInstantiationOf(NNS))
+ return Record;
+ else
+ return 0;
+ }
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ assert(false && "Dependent nested-name-specifier has no DeclContext");
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ return NNS->getAsNamespace();
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const TagType *Tag = NNS->getAsType()->getAsTagType();
+ assert(Tag && "Non-tag type in nested-name-specifier");
+ return Tag->getDecl();
+ } break;
+
+ case NestedNameSpecifier::Global:
+ return Context.getTranslationUnitDecl();
+ }
+
+ // Required to silence a GCC warning.
+ return 0;
+}
+
+bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
+ if (!SS.isSet() || SS.isInvalid())
+ return false;
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ return NNS->isDependent();
+}
+
+// \brief Determine whether this C++ scope specifier refers to an
+// unknown specialization, i.e., a dependent type that is not the
+// current instantiation.
+bool Sema::isUnknownSpecialization(const CXXScopeSpec &SS) {
+ if (!isDependentScopeSpecifier(SS))
+ return false;
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ return getCurrentInstantiationOf(NNS) == 0;
+}
+
+/// \brief If the given nested name specifier refers to the current
+/// instantiation, return the declaration that corresponds to that
+/// current instantiation (C++0x [temp.dep.type]p1).
+///
+/// \param NNS a dependent nested name specifier.
+CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
+ assert(getLangOptions().CPlusPlus && "Only callable in C++");
+ assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed");
+
+ QualType T = QualType(NNS->getAsType(), 0);
+ // If the nested name specifier does not refer to a type, then it
+ // does not refer to the current instantiation.
+ if (T.isNull())
+ return 0;
+
+ T = Context.getCanonicalType(T);
+
+ for (DeclContext *Ctx = CurContext; Ctx; Ctx = Ctx->getParent()) {
+ // If we've hit a namespace or the global scope, then the
+ // nested-name-specifier can't refer to the current instantiation.
+ if (Ctx->isFileContext())
+ return 0;
+
+ // Skip non-class contexts.
+ CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx);
+ if (!Record)
+ continue;
+
+ // If this record type is not dependent,
+ if (!Record->isDependentType())
+ return 0;
+
+ // C++ [temp.dep.type]p1:
+ //
+ // In the definition of a class template, a nested class of a
+ // class template, a member of a class template, or a member of a
+ // nested class of a class template, a name refers to the current
+ // instantiation if it is
+ // -- the injected-class-name (9) of the class template or
+ // nested class,
+ // -- in the definition of a primary class template, the name
+ // of the class template followed by the template argument
+ // list of the primary template (as described below)
+ // enclosed in <>,
+ // -- in the definition of a nested class of a class template,
+ // the name of the nested class referenced as a member of
+ // the current instantiation, or
+ // -- in the definition of a partial specialization, the name
+ // of the class template followed by the template argument
+ // list of the partial specialization enclosed in <>. If
+ // the nth template parameter is a parameter pack, the nth
+ // template argument is a pack expansion (14.6.3) whose
+ // pattern is the name of the parameter pack. (FIXME)
+ //
+ // All of these options come down to having the
+ // nested-name-specifier type that is equivalent to the
+ // injected-class-name of one of the types that is currently in
+ // our context.
+ if (Context.getTypeDeclType(Record) == T)
+ return Record;
+
+ if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
+ QualType InjectedClassName
+ = Template->getInjectedClassNameType(Context);
+ if (T == Context.getCanonicalType(InjectedClassName))
+ return Template->getTemplatedDecl();
+ }
+ }
+
+ return 0;
+}
+
+/// \brief Require that the context specified by SS be complete.
+///
+/// If SS refers to a type, this routine checks whether the type is
+/// complete enough (or can be made complete enough) for name lookup
+/// into the DeclContext. A type that is not yet completed can be
+/// considered "complete enough" if it is a class/struct/union/enum
+/// that is currently being defined. Or, if we have a type that names
+/// a class template specialization that is not a complete type, we
+/// will attempt to instantiate that class template.
+bool Sema::RequireCompleteDeclContext(const CXXScopeSpec &SS) {
+ if (!SS.isSet() || SS.isInvalid())
+ return false;
+
+ DeclContext *DC = computeDeclContext(SS);
+ if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
+ // If we're currently defining this type, then lookup into the
+ // type is okay: don't complain that it isn't complete yet.
+ const TagType *TagT = Context.getTypeDeclType(Tag)->getAsTagType();
+ if (TagT->isBeingDefined())
+ return false;
+
+ // The type must be complete.
+ return RequireCompleteType(SS.getRange().getBegin(),
+ Context.getTypeDeclType(Tag),
+ diag::err_incomplete_nested_name_spec,
+ SS.getRange());
+ }
+
+ return false;
+}
+
+/// ActOnCXXGlobalScopeSpecifier - Return the object that represents the
+/// global scope ('::').
+Sema::CXXScopeTy *Sema::ActOnCXXGlobalScopeSpecifier(Scope *S,
+ SourceLocation CCLoc) {
+ return NestedNameSpecifier::GlobalSpecifier(Context);
+}
+
+/// ActOnCXXNestedNameSpecifier - Called during parsing of a
+/// nested-name-specifier. e.g. for "foo::bar::" we parsed "foo::" and now
+/// we want to resolve "bar::". 'SS' is empty or the previously parsed
+/// nested-name part ("foo::"), 'IdLoc' is the source location of 'bar',
+/// 'CCLoc' is the location of '::' and 'II' is the identifier for 'bar'.
+/// Returns a CXXScopeTy* object representing the C++ scope.
+Sema::CXXScopeTy *Sema::ActOnCXXNestedNameSpecifier(Scope *S,
+ const CXXScopeSpec &SS,
+ SourceLocation IdLoc,
+ SourceLocation CCLoc,
+ IdentifierInfo &II) {
+ NestedNameSpecifier *Prefix
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+
+ // If the prefix already refers to an unknown specialization, there
+ // is no name lookup to perform. Just build the resulting
+ // nested-name-specifier.
+ if (Prefix && isUnknownSpecialization(SS))
+ return NestedNameSpecifier::Create(Context, Prefix, &II);
+
+ NamedDecl *SD = LookupParsedName(S, &SS, &II, LookupNestedNameSpecifierName);
+
+ if (SD) {
+ if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(SD))
+ return NestedNameSpecifier::Create(Context, Prefix, Namespace);
+
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(SD)) {
+ // Determine whether we have a class (or, in C++0x, an enum) or
+ // a typedef thereof. If so, build the nested-name-specifier.
+ QualType T = Context.getTypeDeclType(Type);
+ bool AcceptableType = false;
+ if (T->isDependentType())
+ AcceptableType = true;
+ else if (TypedefDecl *TD = dyn_cast<TypedefDecl>(SD)) {
+ if (TD->getUnderlyingType()->isRecordType() ||
+ (getLangOptions().CPlusPlus0x &&
+ TD->getUnderlyingType()->isEnumeralType()))
+ AcceptableType = true;
+ } else if (isa<RecordDecl>(Type) ||
+ (getLangOptions().CPlusPlus0x && isa<EnumDecl>(Type)))
+ AcceptableType = true;
+
+ if (AcceptableType)
+ return NestedNameSpecifier::Create(Context, Prefix, false,
+ T.getTypePtr());
+ }
+
+ if (NamespaceAliasDecl *Alias = dyn_cast<NamespaceAliasDecl>(SD))
+ return NestedNameSpecifier::Create(Context, Prefix,
+ Alias->getNamespace());
+
+ // Fall through to produce an error: we found something that isn't
+ // a class or a namespace.
+ }
+
+ // If we didn't find anything during our lookup, try again with
+ // ordinary name lookup, which can help us produce better error
+ // messages.
+ if (!SD)
+ SD = LookupParsedName(S, &SS, &II, LookupOrdinaryName);
+ unsigned DiagID;
+ if (SD)
+ DiagID = diag::err_expected_class_or_namespace;
+ else if (SS.isSet())
+ DiagID = diag::err_typecheck_no_member;
+ else
+ DiagID = diag::err_undeclared_var_use;
+
+ if (SS.isSet())
+ Diag(IdLoc, DiagID) << &II << SS.getRange();
+ else
+ Diag(IdLoc, DiagID) << &II;
+
+ return 0;
+}
+
+Sema::CXXScopeTy *Sema::ActOnCXXNestedNameSpecifier(Scope *S,
+ const CXXScopeSpec &SS,
+ TypeTy *Ty,
+ SourceRange TypeRange,
+ SourceLocation CCLoc) {
+ NestedNameSpecifier *Prefix
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ QualType T = QualType::getFromOpaquePtr(Ty);
+ return NestedNameSpecifier::Create(Context, Prefix, /*FIXME:*/false,
+ T.getTypePtr());
+}
+
+/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
+/// scope or nested-name-specifier) is parsed, part of a declarator-id.
+/// After this method is called, according to [C++ 3.4.3p3], names should be
+/// looked up in the declarator-id's scope, until the declarator is parsed and
+/// ActOnCXXExitDeclaratorScope is called.
+/// The 'SS' should be a non-empty valid CXXScopeSpec.
+void Sema::ActOnCXXEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+ assert(PreDeclaratorDC == 0 && "Previous declarator context not popped?");
+ PreDeclaratorDC = static_cast<DeclContext*>(S->getEntity());
+ CurContext = computeDeclContext(SS);
+ assert(CurContext && "No context?");
+ S->setEntity(CurContext);
+}
+
+/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
+/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
+/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
+/// Used to indicate that names should revert to being looked up in the
+/// defining scope.
+void Sema::ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+ assert(S->getEntity() == computeDeclContext(SS) && "Context imbalance!");
+ S->setEntity(PreDeclaratorDC);
+ PreDeclaratorDC = 0;
+
+ // Reset CurContext to the nearest enclosing context.
+ while (!S->getEntity() && S->getParent())
+ S = S->getParent();
+ CurContext = static_cast<DeclContext*>(S->getEntity());
+ assert(CurContext && "No context?");
+}
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
new file mode 100644
index 0000000..4856e7f
--- /dev/null
+++ b/lib/Sema/SemaChecking.cpp
@@ -0,0 +1,1449 @@
+//===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements extra semantic analysis beyond what is enforced
+// by the C type system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include <limits>
+using namespace clang;
+
+/// getLocationOfStringLiteralByte - Return a source location that points to the
+/// specified byte of the specified string literal.
+///
+/// Strings are amazingly complex. They can be formed from multiple tokens and
+/// can have escape sequences in them in addition to the usual trigraph and
+/// escaped newline business. This routine handles this complexity.
+///
+SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
+ unsigned ByteNo) const {
+ assert(!SL->isWide() && "This doesn't work for wide strings yet");
+
+ // Loop over all of the tokens in this string until we find the one that
+ // contains the byte we're looking for.
+ unsigned TokNo = 0;
+ while (1) {
+ assert(TokNo < SL->getNumConcatenated() && "Invalid byte number!");
+ SourceLocation StrTokLoc = SL->getStrTokenLoc(TokNo);
+
+ // Get the spelling of the string so that we can get the data that makes up
+ // the string literal, not the identifier for the macro it is potentially
+ // expanded through.
+ SourceLocation StrTokSpellingLoc = SourceMgr.getSpellingLoc(StrTokLoc);
+
+ // Re-lex the token to get its length and original spelling.
+ std::pair<FileID, unsigned> LocInfo =
+ SourceMgr.getDecomposedLoc(StrTokSpellingLoc);
+ std::pair<const char *,const char *> Buffer =
+ SourceMgr.getBufferData(LocInfo.first);
+ const char *StrData = Buffer.first+LocInfo.second;
+
+ // Create a langops struct and enable trigraphs. This is sufficient for
+ // relexing tokens.
+ LangOptions LangOpts;
+ LangOpts.Trigraphs = true;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(StrTokSpellingLoc, LangOpts, Buffer.first, StrData,
+ Buffer.second);
+ Token TheTok;
+ TheLexer.LexFromRawLexer(TheTok);
+
+ // Use the StringLiteralParser to compute the length of the string in bytes.
+ StringLiteralParser SLP(&TheTok, 1, PP);
+ unsigned TokNumBytes = SLP.GetStringLength();
+
+ // If the byte is in this token, return the location of the byte.
+ if (ByteNo < TokNumBytes ||
+ (ByteNo == TokNumBytes && TokNo == SL->getNumConcatenated())) {
+ unsigned Offset =
+ StringLiteralParser::getOffsetOfStringByte(TheTok, ByteNo, PP);
+
+ // Now that we know the offset of the token in the spelling, use the
+ // preprocessor to get the offset in the original source.
+ return PP.AdvanceToTokenCharacter(StrTokLoc, Offset);
+ }
+
+ // Move to the next string token.
+ ++TokNo;
+ ByteNo -= TokNumBytes;
+ }
+}
+
+
+/// CheckFunctionCall - Check a direct function call for various correctness
+/// and safety properties not strictly enforced by the C type system.
+Action::OwningExprResult
+Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
+ OwningExprResult TheCallResult(Owned(TheCall));
+ // Get the IdentifierInfo* for the called function.
+ IdentifierInfo *FnInfo = FDecl->getIdentifier();
+
+ // None of the checks below are needed for functions that don't have
+ // simple names (e.g., C++ conversion functions).
+ if (!FnInfo)
+ return move(TheCallResult);
+
+ switch (FDecl->getBuiltinID(Context)) {
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ assert(TheCall->getNumArgs() == 1 &&
+ "Wrong # arguments to builtin CFStringMakeConstantString");
+ if (CheckObjCString(TheCall->getArg(0)))
+ return ExprError();
+ return move(TheCallResult);
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ if (SemaBuiltinVAStart(TheCall))
+ return ExprError();
+ return move(TheCallResult);
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered:
+ if (SemaBuiltinUnorderedCompare(TheCall))
+ return ExprError();
+ return move(TheCallResult);
+ case Builtin::BI__builtin_return_address:
+ case Builtin::BI__builtin_frame_address:
+ if (SemaBuiltinStackAddress(TheCall))
+ return ExprError();
+ return move(TheCallResult);
+ case Builtin::BI__builtin_shufflevector:
+ return SemaBuiltinShuffleVector(TheCall);
+ // TheCall will be freed by the smart pointer here, but that's fine, since
+ // SemaBuiltinShuffleVector guts it, but then doesn't release it.
+ case Builtin::BI__builtin_prefetch:
+ if (SemaBuiltinPrefetch(TheCall))
+ return ExprError();
+ return move(TheCallResult);
+ case Builtin::BI__builtin_object_size:
+ if (SemaBuiltinObjectSize(TheCall))
+ return ExprError();
+ return move(TheCallResult);
+ case Builtin::BI__builtin_longjmp:
+ if (SemaBuiltinLongjmp(TheCall))
+ return ExprError();
+ return move(TheCallResult);
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ if (SemaBuiltinAtomicOverloaded(TheCall))
+ return ExprError();
+ return move(TheCallResult);
+ }
+
+ // FIXME: This mechanism should be abstracted to be less fragile and
+ // more efficient. For example, just map function ids to custom
+ // handlers.
+
+ // Printf checking.
+ if (const FormatAttr *Format = FDecl->getAttr<FormatAttr>()) {
+ if (Format->getType() == "printf") {
+ bool HasVAListArg = Format->getFirstArg() == 0;
+ if (!HasVAListArg) {
+ if (const FunctionProtoType *Proto
+ = FDecl->getType()->getAsFunctionProtoType())
+ HasVAListArg = !Proto->isVariadic();
+ }
+ CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1,
+ HasVAListArg ? 0 : Format->getFirstArg() - 1);
+ }
+ }
+ for (const Attr *attr = FDecl->getAttrs(); attr; attr = attr->getNext()) {
+ if (const NonNullAttr *NonNull = dyn_cast<NonNullAttr>(attr))
+ CheckNonNullArguments(NonNull, TheCall);
+ }
+
+ return move(TheCallResult);
+}
+
+Action::OwningExprResult
+Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
+
+ OwningExprResult TheCallResult(Owned(TheCall));
+ // Printf checking.
+ const FormatAttr *Format = NDecl->getAttr<FormatAttr>();
+ if (!Format)
+ return move(TheCallResult);
+ const VarDecl *V = dyn_cast<VarDecl>(NDecl);
+ if (!V)
+ return move(TheCallResult);
+ QualType Ty = V->getType();
+ if (!Ty->isBlockPointerType())
+ return move(TheCallResult);
+ if (Format->getType() == "printf") {
+ bool HasVAListArg = Format->getFirstArg() == 0;
+ if (!HasVAListArg) {
+ const FunctionType *FT =
+ Ty->getAsBlockPointerType()->getPointeeType()->getAsFunctionType();
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
+ HasVAListArg = !Proto->isVariadic();
+ }
+ CheckPrintfArguments(TheCall, HasVAListArg, Format->getFormatIdx() - 1,
+ HasVAListArg ? 0 : Format->getFirstArg() - 1);
+ }
+ return move(TheCallResult);
+}
+
+/// SemaBuiltinAtomicOverloaded - We have a call to a function like
+/// __sync_fetch_and_add, which is an overloaded function based on the pointer
+/// type of its first argument. The main ActOnCallExpr routines have already
+/// promoted the types of arguments because all of these calls are prototyped as
+/// void(...).
+///
+/// This function goes through and does final semantic checking for these
+/// builtins,
+bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) {
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+
+ // Ensure that we have at least one argument to do type inference from.
+ if (TheCall->getNumArgs() < 1)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << TheCall->getCallee()->getSourceRange();
+
+ // Inspect the first argument of the atomic builtin. This should always be
+ // a pointer type, whose element is an integral scalar or pointer type.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ Expr *FirstArg = TheCall->getArg(0);
+ if (!FirstArg->getType()->isPointerType())
+ return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+
+ QualType ValType = FirstArg->getType()->getAsPointerType()->getPointeeType();
+ if (!ValType->isIntegerType() && !ValType->isPointerType() &&
+ !ValType->isBlockPointerType())
+ return Diag(DRE->getLocStart(),
+ diag::err_atomic_builtin_must_be_pointer_intptr)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+
+ // We need to figure out which concrete builtin this maps onto. For example,
+ // __sync_fetch_and_add with a 2 byte object turns into
+ // __sync_fetch_and_add_2.
+#define BUILTIN_ROW(x) \
+ { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
+ Builtin::BI##x##_8, Builtin::BI##x##_16 }
+
+ static const unsigned BuiltinIndices[][5] = {
+ BUILTIN_ROW(__sync_fetch_and_add),
+ BUILTIN_ROW(__sync_fetch_and_sub),
+ BUILTIN_ROW(__sync_fetch_and_or),
+ BUILTIN_ROW(__sync_fetch_and_and),
+ BUILTIN_ROW(__sync_fetch_and_xor),
+ BUILTIN_ROW(__sync_fetch_and_nand),
+
+ BUILTIN_ROW(__sync_add_and_fetch),
+ BUILTIN_ROW(__sync_sub_and_fetch),
+ BUILTIN_ROW(__sync_and_and_fetch),
+ BUILTIN_ROW(__sync_or_and_fetch),
+ BUILTIN_ROW(__sync_xor_and_fetch),
+ BUILTIN_ROW(__sync_nand_and_fetch),
+
+ BUILTIN_ROW(__sync_val_compare_and_swap),
+ BUILTIN_ROW(__sync_bool_compare_and_swap),
+ BUILTIN_ROW(__sync_lock_test_and_set),
+ BUILTIN_ROW(__sync_lock_release)
+ };
+#undef BUILTIN_ROW
+
+ // Determine the index of the size.
+ unsigned SizeIndex;
+ switch (Context.getTypeSize(ValType)/8) {
+ case 1: SizeIndex = 0; break;
+ case 2: SizeIndex = 1; break;
+ case 4: SizeIndex = 2; break;
+ case 8: SizeIndex = 3; break;
+ case 16: SizeIndex = 4; break;
+ default:
+ return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ }
+
+ // Each of these builtins has one pointer argument, followed by some number of
+ // values (0, 1 or 2) followed by a potentially empty varags list of stuff
+ // that we ignore. Find out which row of BuiltinIndices to read from as well
+ // as the number of fixed args.
+ unsigned BuiltinID = FDecl->getBuiltinID(Context);
+ unsigned BuiltinIndex, NumFixed = 1;
+ switch (BuiltinID) {
+ default: assert(0 && "Unknown overloaded atomic builtin!");
+ case Builtin::BI__sync_fetch_and_add: BuiltinIndex = 0; break;
+ case Builtin::BI__sync_fetch_and_sub: BuiltinIndex = 1; break;
+ case Builtin::BI__sync_fetch_and_or: BuiltinIndex = 2; break;
+ case Builtin::BI__sync_fetch_and_and: BuiltinIndex = 3; break;
+ case Builtin::BI__sync_fetch_and_xor: BuiltinIndex = 4; break;
+ case Builtin::BI__sync_fetch_and_nand:BuiltinIndex = 5; break;
+
+ case Builtin::BI__sync_add_and_fetch: BuiltinIndex = 6; break;
+ case Builtin::BI__sync_sub_and_fetch: BuiltinIndex = 7; break;
+ case Builtin::BI__sync_and_and_fetch: BuiltinIndex = 8; break;
+ case Builtin::BI__sync_or_and_fetch: BuiltinIndex = 9; break;
+ case Builtin::BI__sync_xor_and_fetch: BuiltinIndex =10; break;
+ case Builtin::BI__sync_nand_and_fetch:BuiltinIndex =11; break;
+
+ case Builtin::BI__sync_val_compare_and_swap:
+ BuiltinIndex = 12;
+ NumFixed = 2;
+ break;
+ case Builtin::BI__sync_bool_compare_and_swap:
+ BuiltinIndex = 13;
+ NumFixed = 2;
+ break;
+ case Builtin::BI__sync_lock_test_and_set: BuiltinIndex = 14; break;
+ case Builtin::BI__sync_lock_release:
+ BuiltinIndex = 15;
+ NumFixed = 0;
+ break;
+ }
+
+ // Now that we know how many fixed arguments we expect, first check that we
+ // have at least that many.
+ if (TheCall->getNumArgs() < 1+NumFixed)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << TheCall->getCallee()->getSourceRange();
+
+
+ // Get the decl for the concrete builtin from this, we can tell what the
+ // concrete integer type we should convert to is.
+ unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
+ const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
+ IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
+ FunctionDecl *NewBuiltinDecl =
+ cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
+ TUScope, false, DRE->getLocStart()));
+ const FunctionProtoType *BuiltinFT =
+ NewBuiltinDecl->getType()->getAsFunctionProtoType();
+ ValType = BuiltinFT->getArgType(0)->getAsPointerType()->getPointeeType();
+
+ // If the first type needs to be converted (e.g. void** -> int*), do it now.
+ if (BuiltinFT->getArgType(0) != FirstArg->getType()) {
+ ImpCastExprToType(FirstArg, BuiltinFT->getArgType(0), false);
+ TheCall->setArg(0, FirstArg);
+ }
+
+ // Next, walk the valid ones promoting to the right type.
+ for (unsigned i = 0; i != NumFixed; ++i) {
+ Expr *Arg = TheCall->getArg(i+1);
+
+ // If the argument is an implicit cast, then there was a promotion due to
+ // "...", just remove it now.
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ Arg = ICE->getSubExpr();
+ ICE->setSubExpr(0);
+ ICE->Destroy(Context);
+ TheCall->setArg(i+1, Arg);
+ }
+
+ // GCC does an implicit conversion to the pointer or integer ValType. This
+ // can fail in some cases (1i -> int**), check for this error case now.
+ if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg))
+ return true;
+
+ // Okay, we have something that *can* be converted to the right type. Check
+ // to see if there is a potentially weird extension going on here. This can
+ // happen when you do an atomic operation on something like an char* and
+ // pass in 42. The 42 gets converted to char. This is even more strange
+ // for things like 45.123 -> char, etc.
+ // FIXME: Do this check.
+ ImpCastExprToType(Arg, ValType, false);
+ TheCall->setArg(i+1, Arg);
+ }
+
+ // Switch the DeclRefExpr to refer to the new decl.
+ DRE->setDecl(NewBuiltinDecl);
+ DRE->setType(NewBuiltinDecl->getType());
+
+ // Set the callee in the CallExpr.
+ // FIXME: This leaks the original parens and implicit casts.
+ Expr *PromotedCall = DRE;
+ UsualUnaryConversions(PromotedCall);
+ TheCall->setCallee(PromotedCall);
+
+
+ // Change the result type of the call to match the result type of the decl.
+ TheCall->setType(NewBuiltinDecl->getResultType());
+ return false;
+}
+
+
+/// CheckObjCString - Checks that the argument to the builtin
+/// CFString constructor is correct
+/// FIXME: GCC currently emits the following warning:
+/// "warning: input conversion stopped due to an input byte that does not
+/// belong to the input codeset UTF-8"
+/// Note: It might also make sense to do the UTF-16 conversion here (would
+/// simplify the backend).
+bool Sema::CheckObjCString(Expr *Arg) {
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
+
+ if (!Literal || Literal->isWide()) {
+ Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant)
+ << Arg->getSourceRange();
+ return true;
+ }
+
+ const char *Data = Literal->getStrData();
+ unsigned Length = Literal->getByteLength();
+
+ for (unsigned i = 0; i < Length; ++i) {
+ if (!Data[i]) {
+ Diag(getLocationOfStringLiteralByte(Literal, i),
+ diag::warn_cfstring_literal_contains_nul_character)
+ << Arg->getSourceRange();
+ break;
+ }
+ }
+
+ return false;
+}
+
+/// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity.
+/// Emit an error and return true on failure, return false on success.
+bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
+ Expr *Fn = TheCall->getCallee();
+ if (TheCall->getNumArgs() > 2) {
+ Diag(TheCall->getArg(2)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << Fn->getSourceRange()
+ << SourceRange(TheCall->getArg(2)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+ return true;
+ }
+
+ if (TheCall->getNumArgs() < 2) {
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/;
+ }
+
+ // Determine whether the current function is variadic or not.
+ bool isVariadic;
+ if (CurBlock)
+ isVariadic = CurBlock->isVariadic;
+ else if (getCurFunctionDecl()) {
+ if (FunctionProtoType* FTP =
+ dyn_cast<FunctionProtoType>(getCurFunctionDecl()->getType()))
+ isVariadic = FTP->isVariadic();
+ else
+ isVariadic = false;
+ } else {
+ isVariadic = getCurMethodDecl()->isVariadic();
+ }
+
+ if (!isVariadic) {
+ Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
+ return true;
+ }
+
+ // Verify that the second argument to the builtin is the last argument of the
+ // current function or method.
+ bool SecondArgIsLastNamedArgument = false;
+ const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
+ // FIXME: This isn't correct for methods (results in bogus warning).
+ // Get the last formal in the current function.
+ const ParmVarDecl *LastArg;
+ if (CurBlock)
+ LastArg = *(CurBlock->TheDecl->param_end()-1);
+ else if (FunctionDecl *FD = getCurFunctionDecl())
+ LastArg = *(FD->param_end()-1);
+ else
+ LastArg = *(getCurMethodDecl()->param_end()-1);
+ SecondArgIsLastNamedArgument = PV == LastArg;
+ }
+ }
+
+ if (!SecondArgIsLastNamedArgument)
+ Diag(TheCall->getArg(1)->getLocStart(),
+ diag::warn_second_parameter_of_va_start_not_last_named_argument);
+ return false;
+}
+
+/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
+/// friends. This is declared to take (...), so we have to check everything.
+bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() < 2)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/;
+ if (TheCall->getNumArgs() > 2)
+ return Diag(TheCall->getArg(2)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/
+ << SourceRange(TheCall->getArg(2)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+
+ Expr *OrigArg0 = TheCall->getArg(0);
+ Expr *OrigArg1 = TheCall->getArg(1);
+
+ // Do standard promotions between the two arguments, returning their common
+ // type.
+ QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
+
+ // Make sure any conversions are pushed back into the call; this is
+ // type safe since unordered compare builtins are declared as "_Bool
+ // foo(...)".
+ TheCall->setArg(0, OrigArg0);
+ TheCall->setArg(1, OrigArg1);
+
+ if (OrigArg0->isTypeDependent() || OrigArg1->isTypeDependent())
+ return false;
+
+ // If the common type isn't a real floating type, then the arguments were
+ // invalid for this operation.
+ if (!Res->isRealFloatingType())
+ return Diag(OrigArg0->getLocStart(),
+ diag::err_typecheck_call_invalid_ordered_compare)
+ << OrigArg0->getType() << OrigArg1->getType()
+ << SourceRange(OrigArg0->getLocStart(), OrigArg1->getLocEnd());
+
+ return false;
+}
+
+bool Sema::SemaBuiltinStackAddress(CallExpr *TheCall) {
+ // The signature for these builtins is exact; the only thing we need
+ // to check is that the argument is a constant.
+ SourceLocation Loc;
+ if (!TheCall->getArg(0)->isTypeDependent() &&
+ !TheCall->getArg(0)->isValueDependent() &&
+ !TheCall->getArg(0)->isIntegerConstantExpr(Context, &Loc))
+ return Diag(Loc, diag::err_stack_const_level) << TheCall->getSourceRange();
+
+ return false;
+}
+
+/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
+// This is declared to take (...), so we have to check everything.
+Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() < 3)
+ return ExprError(Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << TheCall->getSourceRange());
+
+ unsigned numElements = std::numeric_limits<unsigned>::max();
+ if (!TheCall->getArg(0)->isTypeDependent() &&
+ !TheCall->getArg(1)->isTypeDependent()) {
+ QualType FAType = TheCall->getArg(0)->getType();
+ QualType SAType = TheCall->getArg(1)->getType();
+
+ if (!FAType->isVectorType() || !SAType->isVectorType()) {
+ Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector)
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ return ExprError();
+ }
+
+ if (Context.getCanonicalType(FAType).getUnqualifiedType() !=
+ Context.getCanonicalType(SAType).getUnqualifiedType()) {
+ Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector)
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ return ExprError();
+ }
+
+ numElements = FAType->getAsVectorType()->getNumElements();
+ if (TheCall->getNumArgs() != numElements+2) {
+ if (TheCall->getNumArgs() < numElements+2)
+ return ExprError(Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << TheCall->getSourceRange());
+ return ExprError(Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << TheCall->getSourceRange());
+ }
+ }
+
+ for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
+ if (TheCall->getArg(i)->isTypeDependent() ||
+ TheCall->getArg(i)->isValueDependent())
+ continue;
+
+ llvm::APSInt Result(32);
+ if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_nonconstant_argument)
+ << TheCall->getArg(i)->getSourceRange());
+
+ if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_argument_too_large)
+ << TheCall->getArg(i)->getSourceRange());
+ }
+
+ llvm::SmallVector<Expr*, 32> exprs;
+
+ for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
+ exprs.push_back(TheCall->getArg(i));
+ TheCall->setArg(i, 0);
+ }
+
+ return Owned(new (Context) ShuffleVectorExpr(exprs.begin(), exprs.size(),
+ exprs[0]->getType(),
+ TheCall->getCallee()->getLocStart(),
+ TheCall->getRParenLoc()));
+}
+
+/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
+// This is declared to take (const void*, ...) and can take two
+// optional constant int args.
+bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
+ unsigned NumArgs = TheCall->getNumArgs();
+
+ if (NumArgs > 3)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << TheCall->getSourceRange();
+
+ // Argument 0 is checked for us and the remaining arguments must be
+ // constant integers.
+ for (unsigned i = 1; i != NumArgs; ++i) {
+ Expr *Arg = TheCall->getArg(i);
+ if (Arg->isTypeDependent())
+ continue;
+
+ QualType RWType = Arg->getType();
+
+ const BuiltinType *BT = RWType->getAsBuiltinType();
+ llvm::APSInt Result;
+ if (!BT || BT->getKind() != BuiltinType::Int)
+ return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_argument)
+ << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+
+ if (Arg->isValueDependent())
+ continue;
+
+ if (!Arg->isIntegerConstantExpr(Result, Context))
+ return Diag(TheCall->getLocStart(), diag::err_prefetch_invalid_argument)
+ << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+
+ // FIXME: gcc issues a warning and rewrites these to 0. These
+ // seems especially odd for the third argument since the default
+ // is 3.
+ if (i == 1) {
+ if (Result.getSExtValue() < 0 || Result.getSExtValue() > 1)
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << "0" << "1" << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+ } else {
+ if (Result.getSExtValue() < 0 || Result.getSExtValue() > 3)
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << "0" << "3" << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+ }
+ }
+
+ return false;
+}
+
+/// SemaBuiltinObjectSize - Handle __builtin_object_size(void *ptr,
+/// int type). This simply type checks that type is one of the defined
+/// constants (0-3).
+bool Sema::SemaBuiltinObjectSize(CallExpr *TheCall) {
+ Expr *Arg = TheCall->getArg(1);
+ if (Arg->isTypeDependent())
+ return false;
+
+ QualType ArgType = Arg->getType();
+ const BuiltinType *BT = ArgType->getAsBuiltinType();
+ llvm::APSInt Result(32);
+ if (!BT || BT->getKind() != BuiltinType::Int)
+ return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument)
+ << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+
+ if (Arg->isValueDependent())
+ return false;
+
+ if (!Arg->isIntegerConstantExpr(Result, Context)) {
+ return Diag(TheCall->getLocStart(), diag::err_object_size_invalid_argument)
+ << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+ }
+
+ if (Result.getSExtValue() < 0 || Result.getSExtValue() > 3) {
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << "0" << "3" << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+ }
+
+ return false;
+}
+
+/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
+/// This checks that val is a constant 1.
+bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
+ Expr *Arg = TheCall->getArg(1);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ llvm::APSInt Result(32);
+ if (!Arg->isIntegerConstantExpr(Result, Context) || Result != 1)
+ return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val)
+ << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+
+ return false;
+}
+
+// Handle i > 1 ? "x" : "y", recursivelly
+bool Sema::SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall,
+ bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg) {
+ if (E->isTypeDependent() || E->isValueDependent())
+ return false;
+
+ switch (E->getStmtClass()) {
+ case Stmt::ConditionalOperatorClass: {
+ const ConditionalOperator *C = cast<ConditionalOperator>(E);
+ return SemaCheckStringLiteral(C->getLHS(), TheCall,
+ HasVAListArg, format_idx, firstDataArg)
+ && SemaCheckStringLiteral(C->getRHS(), TheCall,
+ HasVAListArg, format_idx, firstDataArg);
+ }
+
+ case Stmt::ImplicitCastExprClass: {
+ const ImplicitCastExpr *Expr = cast<ImplicitCastExpr>(E);
+ return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg,
+ format_idx, firstDataArg);
+ }
+
+ case Stmt::ParenExprClass: {
+ const ParenExpr *Expr = cast<ParenExpr>(E);
+ return SemaCheckStringLiteral(Expr->getSubExpr(), TheCall, HasVAListArg,
+ format_idx, firstDataArg);
+ }
+
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ // As an exception, do not flag errors for variables binding to
+ // const string literals.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ bool isConstant = false;
+ QualType T = DR->getType();
+
+ if (const ArrayType *AT = Context.getAsArrayType(T)) {
+ isConstant = AT->getElementType().isConstant(Context);
+ }
+ else if (const PointerType *PT = T->getAsPointerType()) {
+ isConstant = T.isConstant(Context) &&
+ PT->getPointeeType().isConstant(Context);
+ }
+
+ if (isConstant) {
+ const VarDecl *Def = 0;
+ if (const Expr *Init = VD->getDefinition(Def))
+ return SemaCheckStringLiteral(Init, TheCall,
+ HasVAListArg, format_idx, firstDataArg);
+ }
+ }
+
+ return false;
+ }
+
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::StringLiteralClass: {
+ const StringLiteral *StrE = NULL;
+
+ if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
+ StrE = ObjCFExpr->getString();
+ else
+ StrE = cast<StringLiteral>(E);
+
+ if (StrE) {
+ CheckPrintfString(StrE, E, TheCall, HasVAListArg, format_idx,
+ firstDataArg);
+ return true;
+ }
+
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+void
+Sema::CheckNonNullArguments(const NonNullAttr *NonNull, const CallExpr *TheCall)
+{
+ for (NonNullAttr::iterator i = NonNull->begin(), e = NonNull->end();
+ i != e; ++i) {
+ const Expr *ArgExpr = TheCall->getArg(*i);
+ if (ArgExpr->isNullPointerConstant(Context))
+ Diag(TheCall->getCallee()->getLocStart(), diag::warn_null_arg)
+ << ArgExpr->getSourceRange();
+ }
+}
+
+/// CheckPrintfArguments - Check calls to printf (and similar functions) for
+/// correct use of format strings.
+///
+/// HasVAListArg - A predicate indicating whether the printf-like
+/// function is passed an explicit va_arg argument (e.g., vprintf)
+///
+/// format_idx - The index into Args for the format string.
+///
+/// Improper format strings to functions in the printf family can be
+/// the source of bizarre bugs and very serious security holes. A
+/// good source of information is available in the following paper
+/// (which includes additional references):
+///
+/// FormatGuard: Automatic Protection From printf Format String
+/// Vulnerabilities, Proceedings of the 10th USENIX Security Symposium, 2001.
+///
+/// Functionality implemented:
+///
+/// We can statically check the following properties for string
+/// literal format strings for non v.*printf functions (where the
+/// arguments are passed directly):
+//
+/// (1) Are the number of format conversions equal to the number of
+/// data arguments?
+///
+/// (2) Does each format conversion correctly match the type of the
+/// corresponding data argument? (TODO)
+///
+/// Moreover, for all printf functions we can:
+///
+/// (3) Check for a missing format string (when not caught by type checking).
+///
+/// (4) Check for no-operation flags; e.g. using "#" with format
+/// conversion 'c' (TODO)
+///
+/// (5) Check the use of '%n', a major source of security holes.
+///
+/// (6) Check for malformed format conversions that don't specify anything.
+///
+/// (7) Check for empty format strings. e.g: printf("");
+///
+/// (8) Check that the format string is a wide literal.
+///
+/// (9) Also check the arguments of functions with the __format__ attribute.
+/// (TODO).
+///
+/// All of these checks can be done by parsing the format string.
+///
+/// For now, we ONLY do (1), (3), (5), (6), (7), and (8).
+void
+Sema::CheckPrintfArguments(const CallExpr *TheCall, bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg) {
+ const Expr *Fn = TheCall->getCallee();
+
+ // CHECK: printf-like function is called with no format string.
+ if (format_idx >= TheCall->getNumArgs()) {
+ Diag(TheCall->getRParenLoc(), diag::warn_printf_missing_format_string)
+ << Fn->getSourceRange();
+ return;
+ }
+
+ const Expr *OrigFormatExpr = TheCall->getArg(format_idx)->IgnoreParenCasts();
+
+ // CHECK: format string is not a string literal.
+ //
+ // Dynamically generated format strings are difficult to
+ // automatically vet at compile time. Requiring that format strings
+ // are string literals: (1) permits the checking of format strings by
+ // the compiler and thereby (2) can practically remove the source of
+ // many format string exploits.
+
+ // Format string can be either ObjC string (e.g. @"%d") or
+ // C string (e.g. "%d")
+ // ObjC string uses the same format specifiers as C string, so we can use
+ // the same format string checking logic for both ObjC and C strings.
+ if (SemaCheckStringLiteral(OrigFormatExpr, TheCall, HasVAListArg, format_idx,
+ firstDataArg))
+ return; // Literal format string found, check done!
+
+ // For vprintf* functions (i.e., HasVAListArg==true), we add a
+ // special check to see if the format string is a function parameter
+ // of the function calling the printf function. If the function
+ // has an attribute indicating it is a printf-like function, then we
+ // should suppress warnings concerning non-literals being used in a call
+ // to a vprintf function. For example:
+ //
+ // void
+ // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...) {
+ // va_list ap;
+ // va_start(ap, fmt);
+ // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
+ // ...
+ //
+ //
+ // FIXME: We don't have full attribute support yet, so just check to see
+ // if the argument is a DeclRefExpr that references a parameter. We'll
+ // add proper support for checking the attribute later.
+ if (HasVAListArg)
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(OrigFormatExpr))
+ if (isa<ParmVarDecl>(DR->getDecl()))
+ return;
+
+ // If there are no arguments specified, warn with -Wformat-security, otherwise
+ // warn only with -Wformat-nonliteral.
+ if (TheCall->getNumArgs() == format_idx+1)
+ Diag(TheCall->getArg(format_idx)->getLocStart(),
+ diag::warn_printf_nonliteral_noargs)
+ << OrigFormatExpr->getSourceRange();
+ else
+ Diag(TheCall->getArg(format_idx)->getLocStart(),
+ diag::warn_printf_nonliteral)
+ << OrigFormatExpr->getSourceRange();
+}
+
+void Sema::CheckPrintfString(const StringLiteral *FExpr,
+ const Expr *OrigFormatExpr,
+ const CallExpr *TheCall, bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg) {
+
+ const ObjCStringLiteral *ObjCFExpr =
+ dyn_cast<ObjCStringLiteral>(OrigFormatExpr);
+
+ // CHECK: is the format string a wide literal?
+ if (FExpr->isWide()) {
+ Diag(FExpr->getLocStart(),
+ diag::warn_printf_format_string_is_wide_literal)
+ << OrigFormatExpr->getSourceRange();
+ return;
+ }
+
+ // Str - The format string. NOTE: this is NOT null-terminated!
+ const char *Str = FExpr->getStrData();
+
+ // CHECK: empty format string?
+ unsigned StrLen = FExpr->getByteLength();
+
+ if (StrLen == 0) {
+ Diag(FExpr->getLocStart(), diag::warn_printf_empty_format_string)
+ << OrigFormatExpr->getSourceRange();
+ return;
+ }
+
+ // We process the format string using a binary state machine. The
+ // current state is stored in CurrentState.
+ enum {
+ state_OrdChr,
+ state_Conversion
+ } CurrentState = state_OrdChr;
+
+ // numConversions - The number of conversions seen so far. This is
+ // incremented as we traverse the format string.
+ unsigned numConversions = 0;
+
+ // numDataArgs - The number of data arguments after the format
+ // string. This can only be determined for non vprintf-like
+ // functions. For those functions, this value is 1 (the sole
+ // va_arg argument).
+ unsigned numDataArgs = TheCall->getNumArgs()-firstDataArg;
+
+ // Inspect the format string.
+ unsigned StrIdx = 0;
+
+ // LastConversionIdx - Index within the format string where we last saw
+ // a '%' character that starts a new format conversion.
+ unsigned LastConversionIdx = 0;
+
+ for (; StrIdx < StrLen; ++StrIdx) {
+
+ // Is the number of detected conversion conversions greater than
+ // the number of matching data arguments? If so, stop.
+ if (!HasVAListArg && numConversions > numDataArgs) break;
+
+ // Handle "\0"
+ if (Str[StrIdx] == '\0') {
+ // The string returned by getStrData() is not null-terminated,
+ // so the presence of a null character is likely an error.
+ Diag(getLocationOfStringLiteralByte(FExpr, StrIdx),
+ diag::warn_printf_format_string_contains_null_char)
+ << OrigFormatExpr->getSourceRange();
+ return;
+ }
+
+ // Ordinary characters (not processing a format conversion).
+ if (CurrentState == state_OrdChr) {
+ if (Str[StrIdx] == '%') {
+ CurrentState = state_Conversion;
+ LastConversionIdx = StrIdx;
+ }
+ continue;
+ }
+
+ // Seen '%'. Now processing a format conversion.
+ switch (Str[StrIdx]) {
+ // Handle dynamic precision or width specifier.
+ case '*': {
+ ++numConversions;
+
+ if (!HasVAListArg) {
+ if (numConversions > numDataArgs) {
+ SourceLocation Loc = getLocationOfStringLiteralByte(FExpr, StrIdx);
+
+ if (Str[StrIdx-1] == '.')
+ Diag(Loc, diag::warn_printf_asterisk_precision_missing_arg)
+ << OrigFormatExpr->getSourceRange();
+ else
+ Diag(Loc, diag::warn_printf_asterisk_width_missing_arg)
+ << OrigFormatExpr->getSourceRange();
+
+ // Don't do any more checking. We'll just emit spurious errors.
+ return;
+ }
+
+ // Perform type checking on width/precision specifier.
+ const Expr *E = TheCall->getArg(format_idx+numConversions);
+ if (const BuiltinType *BT = E->getType()->getAsBuiltinType())
+ if (BT->getKind() == BuiltinType::Int)
+ break;
+
+ SourceLocation Loc = getLocationOfStringLiteralByte(FExpr, StrIdx);
+
+ if (Str[StrIdx-1] == '.')
+ Diag(Loc, diag::warn_printf_asterisk_precision_wrong_type)
+ << E->getType() << E->getSourceRange();
+ else
+ Diag(Loc, diag::warn_printf_asterisk_width_wrong_type)
+ << E->getType() << E->getSourceRange();
+
+ break;
+ }
+ }
+
+ // Characters which can terminate a format conversion
+ // (e.g. "%d"). Characters that specify length modifiers or
+ // other flags are handled by the default case below.
+ //
+ // FIXME: additional checks will go into the following cases.
+ case 'i':
+ case 'd':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ case 'D':
+ case 'O':
+ case 'U':
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ case 'a':
+ case 'A':
+ case 'c':
+ case 'C':
+ case 'S':
+ case 's':
+ case 'p':
+ ++numConversions;
+ CurrentState = state_OrdChr;
+ break;
+
+ case 'm':
+ // FIXME: Warn in situations where this isn't supported!
+ CurrentState = state_OrdChr;
+ break;
+
+ // CHECK: Are we using "%n"? Issue a warning.
+ case 'n': {
+ ++numConversions;
+ CurrentState = state_OrdChr;
+ SourceLocation Loc = getLocationOfStringLiteralByte(FExpr,
+ LastConversionIdx);
+
+ Diag(Loc, diag::warn_printf_write_back)<<OrigFormatExpr->getSourceRange();
+ break;
+ }
+
+ // Handle "%@"
+ case '@':
+ // %@ is allowed in ObjC format strings only.
+ if(ObjCFExpr != NULL)
+ CurrentState = state_OrdChr;
+ else {
+ // Issue a warning: invalid format conversion.
+ SourceLocation Loc =
+ getLocationOfStringLiteralByte(FExpr, LastConversionIdx);
+
+ Diag(Loc, diag::warn_printf_invalid_conversion)
+ << std::string(Str+LastConversionIdx,
+ Str+std::min(LastConversionIdx+2, StrLen))
+ << OrigFormatExpr->getSourceRange();
+ }
+ ++numConversions;
+ break;
+
+ // Handle "%%"
+ case '%':
+ // Sanity check: Was the first "%" character the previous one?
+ // If not, we will assume that we have a malformed format
+ // conversion, and that the current "%" character is the start
+ // of a new conversion.
+ if (StrIdx - LastConversionIdx == 1)
+ CurrentState = state_OrdChr;
+ else {
+ // Issue a warning: invalid format conversion.
+ SourceLocation Loc =
+ getLocationOfStringLiteralByte(FExpr, LastConversionIdx);
+
+ Diag(Loc, diag::warn_printf_invalid_conversion)
+ << std::string(Str+LastConversionIdx, Str+StrIdx)
+ << OrigFormatExpr->getSourceRange();
+
+ // This conversion is broken. Advance to the next format
+ // conversion.
+ LastConversionIdx = StrIdx;
+ ++numConversions;
+ }
+ break;
+
+ default:
+ // This case catches all other characters: flags, widths, etc.
+ // We should eventually process those as well.
+ break;
+ }
+ }
+
+ if (CurrentState == state_Conversion) {
+ // Issue a warning: invalid format conversion.
+ SourceLocation Loc =
+ getLocationOfStringLiteralByte(FExpr, LastConversionIdx);
+
+ Diag(Loc, diag::warn_printf_invalid_conversion)
+ << std::string(Str+LastConversionIdx,
+ Str+std::min(LastConversionIdx+2, StrLen))
+ << OrigFormatExpr->getSourceRange();
+ return;
+ }
+
+ if (!HasVAListArg) {
+ // CHECK: Does the number of format conversions exceed the number
+ // of data arguments?
+ if (numConversions > numDataArgs) {
+ SourceLocation Loc =
+ getLocationOfStringLiteralByte(FExpr, LastConversionIdx);
+
+ Diag(Loc, diag::warn_printf_insufficient_data_args)
+ << OrigFormatExpr->getSourceRange();
+ }
+ // CHECK: Does the number of data arguments exceed the number of
+ // format conversions in the format string?
+ else if (numConversions < numDataArgs)
+ Diag(TheCall->getArg(format_idx+numConversions+1)->getLocStart(),
+ diag::warn_printf_too_many_data_args)
+ << OrigFormatExpr->getSourceRange();
+ }
+}
+
+//===--- CHECK: Return Address of Stack Variable --------------------------===//
+
+static DeclRefExpr* EvalVal(Expr *E);
+static DeclRefExpr* EvalAddr(Expr* E);
+
+/// CheckReturnStackAddr - Check if a return statement returns the address
+/// of a stack variable.
+void
+Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
+ SourceLocation ReturnLoc) {
+
+ // Perform checking for returned stack addresses.
+ if (lhsType->isPointerType() || lhsType->isBlockPointerType()) {
+ if (DeclRefExpr *DR = EvalAddr(RetValExp))
+ Diag(DR->getLocStart(), diag::warn_ret_stack_addr)
+ << DR->getDecl()->getDeclName() << RetValExp->getSourceRange();
+
+ // Skip over implicit cast expressions when checking for block expressions.
+ if (ImplicitCastExpr *IcExpr =
+ dyn_cast_or_null<ImplicitCastExpr>(RetValExp))
+ RetValExp = IcExpr->getSubExpr();
+
+ if (BlockExpr *C = dyn_cast_or_null<BlockExpr>(RetValExp))
+ if (C->hasBlockDeclRefExprs())
+ Diag(C->getLocStart(), diag::err_ret_local_block)
+ << C->getSourceRange();
+ }
+ // Perform checking for stack values returned by reference.
+ else if (lhsType->isReferenceType()) {
+ // Check for a reference to the stack
+ if (DeclRefExpr *DR = EvalVal(RetValExp))
+ Diag(DR->getLocStart(), diag::warn_ret_stack_ref)
+ << DR->getDecl()->getDeclName() << RetValExp->getSourceRange();
+ }
+}
+
+/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that
+/// check if the expression in a return statement evaluates to an address
+/// to a location on the stack. The recursion is used to traverse the
+/// AST of the return expression, with recursion backtracking when we
+/// encounter a subexpression that (1) clearly does not lead to the address
+/// of a stack variable or (2) is something we cannot determine leads to
+/// the address of a stack variable based on such local checking.
+///
+/// EvalAddr processes expressions that are pointers that are used as
+/// references (and not L-values). EvalVal handles all other values.
+/// At the base case of the recursion is a check for a DeclRefExpr* in
+/// the refers to a stack variable.
+///
+/// This implementation handles:
+///
+/// * pointer-to-pointer casts
+/// * implicit conversions from array references to pointers
+/// * taking the address of fields
+/// * arbitrary interplay between "&" and "*" operators
+/// * pointer arithmetic from an address of a stack variable
+/// * taking the address of an array element where the array is on the stack
+static DeclRefExpr* EvalAddr(Expr *E) {
+ // We should only be called for evaluating pointer expressions.
+ assert((E->getType()->isPointerType() ||
+ E->getType()->isBlockPointerType() ||
+ E->getType()->isObjCQualifiedIdType()) &&
+ "EvalAddr only works on pointers");
+
+ // Our "symbolic interpreter" is just a dispatch off the currently
+ // viewed AST node. We then recursively traverse the AST by calling
+ // EvalAddr and EvalVal appropriately.
+ switch (E->getStmtClass()) {
+ case Stmt::ParenExprClass:
+ // Ignore parentheses.
+ return EvalAddr(cast<ParenExpr>(E)->getSubExpr());
+
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is AddrOf. All others don't make sense as pointers.
+ UnaryOperator *U = cast<UnaryOperator>(E);
+
+ if (U->getOpcode() == UnaryOperator::AddrOf)
+ return EvalVal(U->getSubExpr());
+ else
+ return NULL;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ // Handle pointer arithmetic. All other binary operators are not valid
+ // in this context.
+ BinaryOperator *B = cast<BinaryOperator>(E);
+ BinaryOperator::Opcode op = B->getOpcode();
+
+ if (op != BinaryOperator::Add && op != BinaryOperator::Sub)
+ return NULL;
+
+ Expr *Base = B->getLHS();
+
+ // Determine which argument is the real pointer base. It could be
+ // the RHS argument instead of the LHS.
+ if (!Base->getType()->isPointerType()) Base = B->getRHS();
+
+ assert (Base->getType()->isPointerType());
+ return EvalAddr(Base);
+ }
+
+ // For conditional operators we need to see if either the LHS or RHS are
+ // valid DeclRefExpr*s. If one of them is valid, we return it.
+ case Stmt::ConditionalOperatorClass: {
+ ConditionalOperator *C = cast<ConditionalOperator>(E);
+
+ // Handle the GNU extension for missing LHS.
+ if (Expr *lhsExpr = C->getLHS())
+ if (DeclRefExpr* LHS = EvalAddr(lhsExpr))
+ return LHS;
+
+ return EvalAddr(C->getRHS());
+ }
+
+ // For casts, we need to handle conversions from arrays to
+ // pointer values, and pointer-to-pointer conversions.
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass: {
+ Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
+ QualType T = SubExpr->getType();
+
+ if (SubExpr->getType()->isPointerType() ||
+ SubExpr->getType()->isBlockPointerType() ||
+ SubExpr->getType()->isObjCQualifiedIdType())
+ return EvalAddr(SubExpr);
+ else if (T->isArrayType())
+ return EvalVal(SubExpr);
+ else
+ return 0;
+ }
+
+ // C++ casts. For dynamic casts, static casts, and const casts, we
+ // are always converting from a pointer-to-pointer, so we just blow
+ // through the cast. In the case the dynamic cast doesn't fail (and
+ // return NULL), we take the conservative route and report cases
+ // where we return the address of a stack variable. For Reinterpre
+ // FIXME: The comment about is wrong; we're not always converting
+ // from pointer to pointer. I'm guessing that this code should also
+ // handle references to objects.
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::CXXDynamicCastExprClass:
+ case Stmt::CXXConstCastExprClass:
+ case Stmt::CXXReinterpretCastExprClass: {
+ Expr *S = cast<CXXNamedCastExpr>(E)->getSubExpr();
+ if (S->getType()->isPointerType() || S->getType()->isBlockPointerType())
+ return EvalAddr(S);
+ else
+ return NULL;
+ }
+
+ // Everything else: we simply don't reason about them.
+ default:
+ return NULL;
+ }
+}
+
+
+/// EvalVal - This function is complements EvalAddr in the mutual recursion.
+/// See the comments for EvalAddr for more details.
+static DeclRefExpr* EvalVal(Expr *E) {
+
+ // We should only be called for evaluating non-pointer expressions, or
+ // expressions with a pointer type that are not used as references but instead
+ // are l-values (e.g., DeclRefExpr with a pointer type).
+
+ // Our "symbolic interpreter" is just a dispatch off the currently
+ // viewed AST node. We then recursively traverse the AST by calling
+ // EvalAddr and EvalVal appropriately.
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass:
+ case Stmt::QualifiedDeclRefExprClass: {
+ // DeclRefExpr: the base case. When we hit a DeclRefExpr we are looking
+ // at code that refers to a variable's name. We check if it has local
+ // storage within the function, and if so, return the expression.
+ DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
+ if(V->hasLocalStorage() && !V->getType()->isReferenceType()) return DR;
+
+ return NULL;
+ }
+
+ case Stmt::ParenExprClass:
+ // Ignore parentheses.
+ return EvalVal(cast<ParenExpr>(E)->getSubExpr());
+
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is Deref. All others don't resolve to a "name." This includes
+ // handling all sorts of rvalues passed to a unary operator.
+ UnaryOperator *U = cast<UnaryOperator>(E);
+
+ if (U->getOpcode() == UnaryOperator::Deref)
+ return EvalAddr(U->getSubExpr());
+
+ return NULL;
+ }
+
+ case Stmt::ArraySubscriptExprClass: {
+ // Array subscripts are potential references to data on the stack. We
+ // retrieve the DeclRefExpr* for the array variable if it indeed
+ // has local storage.
+ return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase());
+ }
+
+ case Stmt::ConditionalOperatorClass: {
+ // For conditional operators we need to see if either the LHS or RHS are
+ // non-NULL DeclRefExpr's. If one is non-NULL, we return it.
+ ConditionalOperator *C = cast<ConditionalOperator>(E);
+
+ // Handle the GNU extension for missing LHS.
+ if (Expr *lhsExpr = C->getLHS())
+ if (DeclRefExpr *LHS = EvalVal(lhsExpr))
+ return LHS;
+
+ return EvalVal(C->getRHS());
+ }
+
+ // Accesses to members are potential references to data on the stack.
+ case Stmt::MemberExprClass: {
+ MemberExpr *M = cast<MemberExpr>(E);
+
+ // Check for indirect access. We only want direct field accesses.
+ if (!M->isArrow())
+ return EvalVal(M->getBase());
+ else
+ return NULL;
+ }
+
+ // Everything else: we simply don't reason about them.
+ default:
+ return NULL;
+ }
+}
+
+//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
+
+/// Check for comparisons of floating point operands using != and ==.
+/// Issue a warning if these are no self-comparisons, as they are not likely
+/// to do what the programmer intended.
+void Sema::CheckFloatComparison(SourceLocation loc, Expr* lex, Expr *rex) {
+ bool EmitWarning = true;
+
+ Expr* LeftExprSansParen = lex->IgnoreParens();
+ Expr* RightExprSansParen = rex->IgnoreParens();
+
+ // Special case: check for x == x (which is OK).
+ // Do not emit warnings for such cases.
+ if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
+ if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
+ if (DRL->getDecl() == DRR->getDecl())
+ EmitWarning = false;
+
+
+ // Special case: check for comparisons against literals that can be exactly
+ // represented by APFloat. In such cases, do not emit a warning. This
+ // is a heuristic: often comparison against such literals are used to
+ // detect if a value in a variable has not changed. This clearly can
+ // lead to false negatives.
+ if (EmitWarning) {
+ if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
+ if (FLL->isExact())
+ EmitWarning = false;
+ }
+ else
+ if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)){
+ if (FLR->isExact())
+ EmitWarning = false;
+ }
+ }
+
+ // Check for comparisons with builtin types.
+ if (EmitWarning)
+ if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
+ if (CL->isBuiltinCall(Context))
+ EmitWarning = false;
+
+ if (EmitWarning)
+ if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
+ if (CR->isBuiltinCall(Context))
+ EmitWarning = false;
+
+ // Emit the diagnostic.
+ if (EmitWarning)
+ Diag(loc, diag::warn_floatingpoint_eq)
+ << lex->getSourceRange() << rex->getSourceRange();
+}
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
new file mode 100644
index 0000000..959154c
--- /dev/null
+++ b/lib/Sema/SemaDecl.cpp
@@ -0,0 +1,4415 @@
+//===--- SemaDecl.cpp - Semantic Analysis for Declarations ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "SemaInherit.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/SourceManager.h"
+// FIXME: layering (ideally, Sema shouldn't be dependent on Lex API's)
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+#include <functional>
+using namespace clang;
+
+/// getDeclName - Return a pretty name for the specified decl if possible, or
+/// an empty string if not. This is used for pretty crash reporting.
+std::string Sema::getDeclName(DeclPtrTy d) {
+ Decl *D = d.getAs<Decl>();
+ if (NamedDecl *DN = dyn_cast_or_null<NamedDecl>(D))
+ return DN->getQualifiedNameAsString();
+ return "";
+}
+
+Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(DeclPtrTy Ptr) {
+ return DeclGroupPtrTy::make(DeclGroupRef(Ptr.getAs<Decl>()));
+}
+
+/// \brief If the identifier refers to a type name within this scope,
+/// return the declaration of that type.
+///
+/// This routine performs ordinary name lookup of the identifier II
+/// within the given scope, with optional C++ scope specifier SS, to
+/// determine whether the name refers to a type. If so, returns an
+/// opaque pointer (actually a QualType) corresponding to that
+/// type. Otherwise, returns NULL.
+///
+/// If name lookup results in an ambiguity, this routine will complain
+/// and then return NULL.
+Sema::TypeTy *Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
+ Scope *S, const CXXScopeSpec *SS) {
+ // C++ [temp.res]p3:
+ // A qualified-id that refers to a type and in which the
+ // nested-name-specifier depends on a template-parameter (14.6.2)
+ // shall be prefixed by the keyword typename to indicate that the
+ // qualified-id denotes a type, forming an
+ // elaborated-type-specifier (7.1.5.3).
+ //
+ // We therefore do not perform any name lookup if the result would
+ // refer to a member of an unknown specialization.
+ if (SS && isUnknownSpecialization(*SS))
+ return 0;
+
+ LookupResult Result
+ = LookupParsedName(S, SS, &II, LookupOrdinaryName, false, false);
+
+ NamedDecl *IIDecl = 0;
+ switch (Result.getKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::FoundOverloaded:
+ return 0;
+
+ case LookupResult::AmbiguousBaseSubobjectTypes:
+ case LookupResult::AmbiguousBaseSubobjects:
+ case LookupResult::AmbiguousReference: {
+ // Look to see if we have a type anywhere in the list of results.
+ for (LookupResult::iterator Res = Result.begin(), ResEnd = Result.end();
+ Res != ResEnd; ++Res) {
+ if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res)) {
+ if (!IIDecl ||
+ (*Res)->getLocation().getRawEncoding() <
+ IIDecl->getLocation().getRawEncoding())
+ IIDecl = *Res;
+ }
+ }
+
+ if (!IIDecl) {
+ // None of the entities we found is a type, so there is no way
+ // to even assume that the result is a type. In this case, don't
+ // complain about the ambiguity. The parser will either try to
+ // perform this lookup again (e.g., as an object name), which
+ // will produce the ambiguity, or will complain that it expected
+ // a type name.
+ Result.Destroy();
+ return 0;
+ }
+
+ // We found a type within the ambiguous lookup; diagnose the
+ // ambiguity and then return that type. This might be the right
+ // answer, or it might not be, but it suppresses any attempt to
+ // perform the name lookup again.
+ DiagnoseAmbiguousLookup(Result, DeclarationName(&II), NameLoc);
+ break;
+ }
+
+ case LookupResult::Found:
+ IIDecl = Result.getAsDecl();
+ break;
+ }
+
+ if (IIDecl) {
+ QualType T;
+
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) {
+ // Check whether we can use this type
+ (void)DiagnoseUseOfDecl(IIDecl, NameLoc);
+
+ if (getLangOptions().CPlusPlus) {
+ // C++ [temp.local]p2:
+ // Within the scope of a class template specialization or
+ // partial specialization, when the injected-class-name is
+ // not followed by a <, it is equivalent to the
+ // injected-class-name followed by the template-argument s
+ // of the class template specialization or partial
+ // specialization enclosed in <>.
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD))
+ if (RD->isInjectedClassName())
+ if (ClassTemplateDecl *Template = RD->getDescribedClassTemplate())
+ T = Template->getInjectedClassNameType(Context);
+ }
+
+ if (T.isNull())
+ T = Context.getTypeDeclType(TD);
+ } else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
+ // Check whether we can use this interface.
+ (void)DiagnoseUseOfDecl(IIDecl, NameLoc);
+
+ T = Context.getObjCInterfaceType(IDecl);
+ } else
+ return 0;
+
+ if (SS)
+ T = getQualifiedNameType(*SS, T);
+
+ return T.getAsOpaquePtr();
+ }
+
+ return 0;
+}
+
+/// isTagName() - This method is called *for error recovery purposes only*
+/// to determine if the specified name is a valid tag name ("struct foo"). If
+/// so, this returns the TST for the tag corresponding to it (TST_enum,
+/// TST_union, TST_struct, TST_class). This is used to diagnose cases in C
+/// where the user forgot to specify the tag.
+DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
+ // Do a tag name lookup in this scope.
+ LookupResult R = LookupName(S, &II, LookupTagName, false, false);
+ if (R.getKind() == LookupResult::Found)
+ if (const TagDecl *TD = dyn_cast<TagDecl>(R.getAsDecl())) {
+ switch (TD->getTagKind()) {
+ case TagDecl::TK_struct: return DeclSpec::TST_struct;
+ case TagDecl::TK_union: return DeclSpec::TST_union;
+ case TagDecl::TK_class: return DeclSpec::TST_class;
+ case TagDecl::TK_enum: return DeclSpec::TST_enum;
+ }
+ }
+
+ return DeclSpec::TST_unspecified;
+}
+
+
+
+DeclContext *Sema::getContainingDC(DeclContext *DC) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC)) {
+ // A C++ out-of-line method will return to the file declaration context.
+ if (MD->isOutOfLineDefinition())
+ return MD->getLexicalDeclContext();
+
+ // A C++ inline method is parsed *after* the topmost class it was declared
+ // in is fully parsed (it's "complete").
+ // The parsing of a C++ inline method happens at the declaration context of
+ // the topmost (non-nested) class it is lexically declared in.
+ assert(isa<CXXRecordDecl>(MD->getParent()) && "C++ method not in Record.");
+ DC = MD->getParent();
+ while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent()))
+ DC = RD;
+
+ // Return the declaration context of the topmost class the inline method is
+ // declared in.
+ return DC;
+ }
+
+ if (isa<ObjCMethodDecl>(DC))
+ return Context.getTranslationUnitDecl();
+
+ return DC->getLexicalParent();
+}
+
+void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
+ assert(getContainingDC(DC) == CurContext &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = DC;
+ S->setEntity(DC);
+}
+
+void Sema::PopDeclContext() {
+ assert(CurContext && "DeclContext imbalance!");
+
+ CurContext = getContainingDC(CurContext);
+}
+
+/// \brief Determine whether we allow overloading of the function
+/// PrevDecl with another declaration.
+///
+/// This routine determines whether overloading is possible, not
+/// whether some new function is actually an overload. It will return
+/// true in C++ (where we can always provide overloads) or, as an
+/// extension, in C when the previous function is already an
+/// overloaded function declaration or has the "overloadable"
+/// attribute.
+static bool AllowOverloadingOfFunction(Decl *PrevDecl, ASTContext &Context) {
+ if (Context.getLangOptions().CPlusPlus)
+ return true;
+
+ if (isa<OverloadedFunctionDecl>(PrevDecl))
+ return true;
+
+ return PrevDecl->getAttr<OverloadableAttr>() != 0;
+}
+
+/// Add this decl to the scope shadowed decl chains.
+void Sema::PushOnScopeChains(NamedDecl *D, Scope *S) {
+ // Move up the scope chain until we find the nearest enclosing
+ // non-transparent context. The declaration will be introduced into this
+ // scope.
+ while (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext())
+ S = S->getParent();
+
+ S->AddDecl(DeclPtrTy::make(D));
+
+ // Add scoped declarations into their context, so that they can be
+ // found later. Declarations without a context won't be inserted
+ // into any context.
+ CurContext->addDecl(Context, D);
+
+ // C++ [basic.scope]p4:
+ // -- exactly one declaration shall declare a class name or
+ // enumeration name that is not a typedef name and the other
+ // declarations shall all refer to the same object or
+ // enumerator, or all refer to functions and function templates;
+ // in this case the class name or enumeration name is hidden.
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ // We are pushing the name of a tag (enum or class).
+ if (CurContext->getLookupContext()
+ == TD->getDeclContext()->getLookupContext()) {
+ // We're pushing the tag into the current context, which might
+ // require some reshuffling in the identifier resolver.
+ IdentifierResolver::iterator
+ I = IdResolver.begin(TD->getDeclName()),
+ IEnd = IdResolver.end();
+ if (I != IEnd && isDeclInScope(*I, CurContext, S)) {
+ NamedDecl *PrevDecl = *I;
+ for (; I != IEnd && isDeclInScope(*I, CurContext, S);
+ PrevDecl = *I, ++I) {
+ if (TD->declarationReplaces(*I)) {
+ // This is a redeclaration. Remove it from the chain and
+ // break out, so that we'll add in the shadowed
+ // declaration.
+ S->RemoveDecl(DeclPtrTy::make(*I));
+ if (PrevDecl == *I) {
+ IdResolver.RemoveDecl(*I);
+ IdResolver.AddDecl(TD);
+ return;
+ } else {
+ IdResolver.RemoveDecl(*I);
+ break;
+ }
+ }
+ }
+
+ // There is already a declaration with the same name in the same
+ // scope, which is not a tag declaration. It must be found
+ // before we find the new declaration, so insert the new
+ // declaration at the end of the chain.
+ IdResolver.AddShadowedDecl(TD, PrevDecl);
+
+ return;
+ }
+ }
+ } else if (isa<FunctionDecl>(D) &&
+ AllowOverloadingOfFunction(D, Context)) {
+ // We are pushing the name of a function, which might be an
+ // overloaded name.
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ IdentifierResolver::iterator Redecl
+ = std::find_if(IdResolver.begin(FD->getDeclName()),
+ IdResolver.end(),
+ std::bind1st(std::mem_fun(&NamedDecl::declarationReplaces),
+ FD));
+ if (Redecl != IdResolver.end() &&
+ S->isDeclScope(DeclPtrTy::make(*Redecl))) {
+ // There is already a declaration of a function on our
+ // IdResolver chain. Replace it with this declaration.
+ S->RemoveDecl(DeclPtrTy::make(*Redecl));
+ IdResolver.RemoveDecl(*Redecl);
+ }
+ } else if (isa<ObjCInterfaceDecl>(D)) {
+ // We're pushing an Objective-C interface into the current
+ // context. If there is already an alias declaration, remove it first.
+ for (IdentifierResolver::iterator
+ I = IdResolver.begin(D->getDeclName()), IEnd = IdResolver.end();
+ I != IEnd; ++I) {
+ if (isa<ObjCCompatibleAliasDecl>(*I)) {
+ S->RemoveDecl(DeclPtrTy::make(*I));
+ IdResolver.RemoveDecl(*I);
+ break;
+ }
+ }
+ }
+
+ IdResolver.AddDecl(D);
+}
+
+void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
+ if (S->decl_empty()) return;
+ assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
+ "Scope shouldn't contain decls!");
+
+ for (Scope::decl_iterator I = S->decl_begin(), E = S->decl_end();
+ I != E; ++I) {
+ Decl *TmpD = (*I).getAs<Decl>();
+ assert(TmpD && "This decl didn't get pushed??");
+
+ assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?");
+ NamedDecl *D = cast<NamedDecl>(TmpD);
+
+ if (!D->getDeclName()) continue;
+
+ // Remove this name from our lexical scope.
+ IdResolver.RemoveDecl(D);
+ }
+}
+
+/// getObjCInterfaceDecl - Look up a for a class declaration in the scope.
+/// return 0 if one not found.
+ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *Id) {
+ // The third "scope" argument is 0 since we aren't enabling lazy built-in
+ // creation from this context.
+ NamedDecl *IDecl = LookupName(TUScope, Id, LookupOrdinaryName);
+
+ return dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
+}
+
+/// getNonFieldDeclScope - Retrieves the innermost scope, starting
+/// from S, where a non-field would be declared. This routine copes
+/// with the difference between C and C++ scoping rules in structs and
+/// unions. For example, the following code is well-formed in C but
+/// ill-formed in C++:
+/// @code
+/// struct S6 {
+/// enum { BAR } e;
+/// };
+///
+/// void test_S6() {
+/// struct S6 a;
+/// a.e = BAR;
+/// }
+/// @endcode
+/// For the declaration of BAR, this routine will return a different
+/// scope. The scope S will be the scope of the unnamed enumeration
+/// within S6. In C++, this routine will return the scope associated
+/// with S6, because the enumeration's scope is a transparent
+/// context but structures can contain non-field names. In C, this
+/// routine will return the translation unit scope, since the
+/// enumeration's scope is a transparent context and structures cannot
+/// contain non-field names.
+Scope *Sema::getNonFieldDeclScope(Scope *S) {
+ while (((S->getFlags() & Scope::DeclScope) == 0) ||
+ (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext()) ||
+ (S->isClassScope() && !getLangOptions().CPlusPlus))
+ S = S->getParent();
+ return S;
+}
+
+void Sema::InitBuiltinVaListType() {
+ if (!Context.getBuiltinVaListType().isNull())
+ return;
+
+ IdentifierInfo *VaIdent = &Context.Idents.get("__builtin_va_list");
+ NamedDecl *VaDecl = LookupName(TUScope, VaIdent, LookupOrdinaryName);
+ TypedefDecl *VaTypedef = cast<TypedefDecl>(VaDecl);
+ Context.setBuiltinVaListType(Context.getTypedefType(VaTypedef));
+}
+
+/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
+/// file scope. lazily create a decl for it. ForRedeclaration is true
+/// if we're creating this built-in in anticipation of redeclaring the
+/// built-in.
+NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
+ Scope *S, bool ForRedeclaration,
+ SourceLocation Loc) {
+ Builtin::ID BID = (Builtin::ID)bid;
+
+ if (Context.BuiltinInfo.hasVAListUse(BID))
+ InitBuiltinVaListType();
+
+ Builtin::Context::GetBuiltinTypeError Error;
+ QualType R = Context.BuiltinInfo.GetBuiltinType(BID, Context, Error);
+ switch (Error) {
+ case Builtin::Context::GE_None:
+ // Okay
+ break;
+
+ case Builtin::Context::GE_Missing_FILE:
+ if (ForRedeclaration)
+ Diag(Loc, diag::err_implicit_decl_requires_stdio)
+ << Context.BuiltinInfo.GetName(BID);
+ return 0;
+ }
+
+ if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
+ Diag(Loc, diag::ext_implicit_lib_function_decl)
+ << Context.BuiltinInfo.GetName(BID)
+ << R;
+ if (Context.BuiltinInfo.getHeaderName(BID) &&
+ Diags.getDiagnosticLevel(diag::ext_implicit_lib_function_decl)
+ != Diagnostic::Ignored)
+ Diag(Loc, diag::note_please_include_header)
+ << Context.BuiltinInfo.getHeaderName(BID)
+ << Context.BuiltinInfo.GetName(BID);
+ }
+
+ FunctionDecl *New = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ Loc, II, R,
+ FunctionDecl::Extern, false,
+ /*hasPrototype=*/true);
+ New->setImplicit();
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ if (FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
+ llvm::SmallVector<ParmVarDecl*, 16> Params;
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i)
+ Params.push_back(ParmVarDecl::Create(Context, New, SourceLocation(), 0,
+ FT->getArgType(i), VarDecl::None, 0));
+ New->setParams(Context, Params.data(), Params.size());
+ }
+
+ AddKnownFunctionAttributes(New);
+
+ // TUScope is the translation-unit scope to insert this function into.
+ // FIXME: This is hideous. We need to teach PushOnScopeChains to
+ // relate Scopes to DeclContexts, and probably eliminate CurContext
+ // entirely, but we're not there yet.
+ DeclContext *SavedContext = CurContext;
+ CurContext = Context.getTranslationUnitDecl();
+ PushOnScopeChains(New, TUScope);
+ CurContext = SavedContext;
+ return New;
+}
+
+/// GetStdNamespace - This method gets the C++ "std" namespace. This is where
+/// everything from the standard library is defined.
+NamespaceDecl *Sema::GetStdNamespace() {
+ if (!StdNamespace) {
+ IdentifierInfo *StdIdent = &PP.getIdentifierTable().get("std");
+ DeclContext *Global = Context.getTranslationUnitDecl();
+ Decl *Std = LookupQualifiedName(Global, StdIdent, LookupNamespaceName);
+ StdNamespace = dyn_cast_or_null<NamespaceDecl>(Std);
+ }
+ return StdNamespace;
+}
+
+/// MergeTypeDefDecl - We just parsed a typedef 'New' which has the
+/// same name and scope as a previous declaration 'Old'. Figure out
+/// how to resolve this situation, merging decls or emitting
+/// diagnostics as appropriate. If there was an error, set New to be invalid.
+///
+void Sema::MergeTypeDefDecl(TypedefDecl *New, Decl *OldD) {
+ // If either decl is known invalid already, set the new one to be invalid and
+ // don't bother doing any merging checks.
+ if (New->isInvalidDecl() || OldD->isInvalidDecl())
+ return New->setInvalidDecl();
+
+ bool objc_types = false;
+
+ // Allow multiple definitions for ObjC built-in typedefs.
+ // FIXME: Verify the underlying types are equivalent!
+ if (getLangOptions().ObjC1) {
+ const IdentifierInfo *TypeID = New->getIdentifier();
+ switch (TypeID->getLength()) {
+ default: break;
+ case 2:
+ if (!TypeID->isStr("id"))
+ break;
+ Context.setObjCIdType(Context.getTypeDeclType(New));
+ objc_types = true;
+ break;
+ case 5:
+ if (!TypeID->isStr("Class"))
+ break;
+ Context.setObjCClassType(Context.getTypeDeclType(New));
+ return;
+ case 3:
+ if (!TypeID->isStr("SEL"))
+ break;
+ Context.setObjCSelType(Context.getTypeDeclType(New));
+ return;
+ case 8:
+ if (!TypeID->isStr("Protocol"))
+ break;
+ Context.setObjCProtoType(New->getUnderlyingType());
+ return;
+ }
+ // Fall through - the typedef name was not a builtin type.
+ }
+ // Verify the old decl was also a type.
+ TypeDecl *Old = dyn_cast<TypeDecl>(OldD);
+ if (!Old) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ if (OldD->getLocation().isValid())
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // Determine the "old" type we'll use for checking and diagnostics.
+ QualType OldType;
+ if (TypedefDecl *OldTypedef = dyn_cast<TypedefDecl>(Old))
+ OldType = OldTypedef->getUnderlyingType();
+ else
+ OldType = Context.getTypeDeclType(Old);
+
+ // If the typedef types are not identical, reject them in all languages and
+ // with any extensions enabled.
+
+ if (OldType != New->getUnderlyingType() &&
+ Context.getCanonicalType(OldType) !=
+ Context.getCanonicalType(New->getUnderlyingType())) {
+ Diag(New->getLocation(), diag::err_redefinition_different_typedef)
+ << New->getUnderlyingType() << OldType;
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ if (objc_types || getLangOptions().Microsoft)
+ return;
+
+ // C++ [dcl.typedef]p2:
+ // In a given non-class scope, a typedef specifier can be used to
+ // redefine the name of any type declared in that scope to refer
+ // to the type to which it already refers.
+ if (getLangOptions().CPlusPlus) {
+ if (!isa<CXXRecordDecl>(CurContext))
+ return;
+ Diag(New->getLocation(), diag::err_redefinition)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // If we have a redefinition of a typedef in C, emit a warning. This warning
+ // is normally mapped to an error, but can be controlled with
+ // -Wtypedef-redefinition. If either the original was in a system header,
+ // don't emit this for compatibility with GCC.
+ if (PP.getDiagnostics().getSuppressSystemWarnings() &&
+ Context.getSourceManager().isInSystemHeader(Old->getLocation()))
+ return;
+
+ Diag(New->getLocation(), diag::warn_redefinition_of_typedef)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return;
+}
+
+/// DeclhasAttr - returns true if decl Declaration already has the target
+/// attribute.
+static bool DeclHasAttr(const Decl *decl, const Attr *target) {
+ for (const Attr *attr = decl->getAttrs(); attr; attr = attr->getNext())
+ if (attr->getKind() == target->getKind())
+ return true;
+
+ return false;
+}
+
+/// MergeAttributes - append attributes from the Old decl to the New one.
+static void MergeAttributes(Decl *New, Decl *Old, ASTContext &C) {
+ for (const Attr *attr = Old->getAttrs(); attr; attr = attr->getNext()) {
+ if (!DeclHasAttr(New, attr) && attr->isMerged()) {
+ Attr *NewAttr = attr->clone(C);
+ NewAttr->setInherited(true);
+ New->addAttr(NewAttr);
+ }
+ }
+}
+
+/// Used in MergeFunctionDecl to keep track of function parameters in
+/// C.
+struct GNUCompatibleParamWarning {
+ ParmVarDecl *OldParm;
+ ParmVarDecl *NewParm;
+ QualType PromotedType;
+};
+
+/// MergeFunctionDecl - We just parsed a function 'New' from
+/// declarator D which has the same name and scope as a previous
+/// declaration 'Old'. Figure out how to resolve this situation,
+/// merging decls or emitting diagnostics as appropriate.
+///
+/// In C++, New and Old must be declarations that are not
+/// overloaded. Use IsOverload to determine whether New and Old are
+/// overloaded, and to select the Old declaration that New should be
+/// merged with.
+///
+/// Returns true if there was an error, false otherwise.
+bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
+ assert(!isa<OverloadedFunctionDecl>(OldD) &&
+ "Cannot merge with an overloaded function declaration");
+
+ // Verify the old decl was also a function.
+ FunctionDecl *Old = dyn_cast<FunctionDecl>(OldD);
+ if (!Old) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+ return true;
+ }
+
+ // Determine whether the previous declaration was a definition,
+ // implicit declaration, or a declaration.
+ diag::kind PrevDiag;
+ if (Old->isThisDeclarationADefinition())
+ PrevDiag = diag::note_previous_definition;
+ else if (Old->isImplicit())
+ PrevDiag = diag::note_previous_implicit_declaration;
+ else
+ PrevDiag = diag::note_previous_declaration;
+
+ QualType OldQType = Context.getCanonicalType(Old->getType());
+ QualType NewQType = Context.getCanonicalType(New->getType());
+
+ if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) &&
+ New->getStorageClass() == FunctionDecl::Static &&
+ Old->getStorageClass() != FunctionDecl::Static) {
+ Diag(New->getLocation(), diag::err_static_non_static)
+ << New;
+ Diag(Old->getLocation(), PrevDiag);
+ return true;
+ }
+
+ if (getLangOptions().CPlusPlus) {
+ // (C++98 13.1p2):
+ // Certain function declarations cannot be overloaded:
+ // -- Function declarations that differ only in the return type
+ // cannot be overloaded.
+ QualType OldReturnType
+ = cast<FunctionType>(OldQType.getTypePtr())->getResultType();
+ QualType NewReturnType
+ = cast<FunctionType>(NewQType.getTypePtr())->getResultType();
+ if (OldReturnType != NewReturnType) {
+ Diag(New->getLocation(), diag::err_ovl_diff_return_type);
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
+ const CXXMethodDecl* OldMethod = dyn_cast<CXXMethodDecl>(Old);
+ const CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New);
+ if (OldMethod && NewMethod &&
+ OldMethod->getLexicalDeclContext() ==
+ NewMethod->getLexicalDeclContext()) {
+ // -- Member function declarations with the same name and the
+ // same parameter types cannot be overloaded if any of them
+ // is a static member function declaration.
+ if (OldMethod->isStatic() || NewMethod->isStatic()) {
+ Diag(New->getLocation(), diag::err_ovl_static_nonstatic_member);
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
+ // C++ [class.mem]p1:
+ // [...] A member shall not be declared twice in the
+ // member-specification, except that a nested class or member
+ // class template can be declared and then later defined.
+ unsigned NewDiag;
+ if (isa<CXXConstructorDecl>(OldMethod))
+ NewDiag = diag::err_constructor_redeclared;
+ else if (isa<CXXDestructorDecl>(NewMethod))
+ NewDiag = diag::err_destructor_redeclared;
+ else if (isa<CXXConversionDecl>(NewMethod))
+ NewDiag = diag::err_conv_function_redeclared;
+ else
+ NewDiag = diag::err_member_redeclared;
+
+ Diag(New->getLocation(), NewDiag);
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ }
+
+ // (C++98 8.3.5p3):
+ // All declarations for a function shall agree exactly in both the
+ // return type and the parameter-type-list.
+ if (OldQType == NewQType)
+ return MergeCompatibleFunctionDecls(New, Old);
+
+ // Fall through for conflicting redeclarations and redefinitions.
+ }
+
+ // C: Function types need to be compatible, not identical. This handles
+ // duplicate function decls like "void f(int); void f(enum X);" properly.
+ if (!getLangOptions().CPlusPlus &&
+ Context.typesAreCompatible(OldQType, NewQType)) {
+ const FunctionType *OldFuncType = OldQType->getAsFunctionType();
+ const FunctionType *NewFuncType = NewQType->getAsFunctionType();
+ const FunctionProtoType *OldProto = 0;
+ if (isa<FunctionNoProtoType>(NewFuncType) &&
+ (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
+ // The old declaration provided a function prototype, but the
+ // new declaration does not. Merge in the prototype.
+ assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
+ llvm::SmallVector<QualType, 16> ParamTypes(OldProto->arg_type_begin(),
+ OldProto->arg_type_end());
+ NewQType = Context.getFunctionType(NewFuncType->getResultType(),
+ ParamTypes.data(), ParamTypes.size(),
+ OldProto->isVariadic(),
+ OldProto->getTypeQuals());
+ New->setType(NewQType);
+ New->setHasInheritedPrototype();
+
+ // Synthesize a parameter for each argument type.
+ llvm::SmallVector<ParmVarDecl*, 16> Params;
+ for (FunctionProtoType::arg_type_iterator
+ ParamType = OldProto->arg_type_begin(),
+ ParamEnd = OldProto->arg_type_end();
+ ParamType != ParamEnd; ++ParamType) {
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, New,
+ SourceLocation(), 0,
+ *ParamType, VarDecl::None,
+ 0);
+ Param->setImplicit();
+ Params.push_back(Param);
+ }
+
+ New->setParams(Context, Params.data(), Params.size());
+ }
+
+ return MergeCompatibleFunctionDecls(New, Old);
+ }
+
+ // GNU C permits a K&R definition to follow a prototype declaration
+ // if the declared types of the parameters in the K&R definition
+ // match the types in the prototype declaration, even when the
+ // promoted types of the parameters from the K&R definition differ
+ // from the types in the prototype. GCC then keeps the types from
+ // the prototype.
+ //
+ // If a variadic prototype is followed by a non-variadic K&R definition,
+ // the K&R definition becomes variadic. This is sort of an edge case, but
+ // it's legal per the standard depending on how you read C99 6.7.5.3p15 and
+ // C99 6.9.1p8.
+ if (!getLangOptions().CPlusPlus &&
+ Old->hasPrototype() && !New->hasPrototype() &&
+ New->getType()->getAsFunctionProtoType() &&
+ Old->getNumParams() == New->getNumParams()) {
+ llvm::SmallVector<QualType, 16> ArgTypes;
+ llvm::SmallVector<GNUCompatibleParamWarning, 16> Warnings;
+ const FunctionProtoType *OldProto
+ = Old->getType()->getAsFunctionProtoType();
+ const FunctionProtoType *NewProto
+ = New->getType()->getAsFunctionProtoType();
+
+ // Determine whether this is the GNU C extension.
+ QualType MergedReturn = Context.mergeTypes(OldProto->getResultType(),
+ NewProto->getResultType());
+ bool LooseCompatible = !MergedReturn.isNull();
+ for (unsigned Idx = 0, End = Old->getNumParams();
+ LooseCompatible && Idx != End; ++Idx) {
+ ParmVarDecl *OldParm = Old->getParamDecl(Idx);
+ ParmVarDecl *NewParm = New->getParamDecl(Idx);
+ if (Context.typesAreCompatible(OldParm->getType(),
+ NewProto->getArgType(Idx))) {
+ ArgTypes.push_back(NewParm->getType());
+ } else if (Context.typesAreCompatible(OldParm->getType(),
+ NewParm->getType())) {
+ GNUCompatibleParamWarning Warn
+ = { OldParm, NewParm, NewProto->getArgType(Idx) };
+ Warnings.push_back(Warn);
+ ArgTypes.push_back(NewParm->getType());
+ } else
+ LooseCompatible = false;
+ }
+
+ if (LooseCompatible) {
+ for (unsigned Warn = 0; Warn < Warnings.size(); ++Warn) {
+ Diag(Warnings[Warn].NewParm->getLocation(),
+ diag::ext_param_promoted_not_compatible_with_prototype)
+ << Warnings[Warn].PromotedType
+ << Warnings[Warn].OldParm->getType();
+ Diag(Warnings[Warn].OldParm->getLocation(),
+ diag::note_previous_declaration);
+ }
+
+ New->setType(Context.getFunctionType(MergedReturn, &ArgTypes[0],
+ ArgTypes.size(),
+ OldProto->isVariadic(), 0));
+ return MergeCompatibleFunctionDecls(New, Old);
+ }
+
+ // Fall through to diagnose conflicting types.
+ }
+
+ // A function that has already been declared has been redeclared or defined
+ // with a different type- show appropriate diagnostic
+ if (unsigned BuiltinID = Old->getBuiltinID(Context)) {
+ // The user has declared a builtin function with an incompatible
+ // signature.
+ if (Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
+ // The function the user is redeclaring is a library-defined
+ // function like 'malloc' or 'printf'. Warn about the
+ // redeclaration, then pretend that we don't know about this
+ // library built-in.
+ Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New;
+ Diag(Old->getLocation(), diag::note_previous_builtin_declaration)
+ << Old << Old->getType();
+ New->getIdentifier()->setBuiltinID(Builtin::NotBuiltin);
+ Old->setInvalidDecl();
+ return false;
+ }
+
+ PrevDiag = diag::note_previous_builtin_declaration;
+ }
+
+ Diag(New->getLocation(), diag::err_conflicting_types) << New->getDeclName();
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+}
+
+/// \brief Completes the merge of two function declarations that are
+/// known to be compatible.
+///
+/// This routine handles the merging of attributes and other
+/// properties of function declarations form the old declaration to
+/// the new declaration, once we know that New is in fact a
+/// redeclaration of Old.
+///
+/// \returns false
+bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old) {
+ // Merge the attributes
+ MergeAttributes(New, Old, Context);
+
+ // Merge the storage class.
+ if (Old->getStorageClass() != FunctionDecl::Extern)
+ New->setStorageClass(Old->getStorageClass());
+
+ // Merge "inline"
+ if (Old->isInline())
+ New->setInline(true);
+
+ // If this function declaration by itself qualifies as a C99 inline
+ // definition (C99 6.7.4p6), but the previous definition did not,
+ // then the function is not a C99 inline definition.
+ if (New->isC99InlineDefinition() && !Old->isC99InlineDefinition())
+ New->setC99InlineDefinition(false);
+ else if (Old->isC99InlineDefinition() && !New->isC99InlineDefinition()) {
+ // Mark all preceding definitions as not being C99 inline definitions.
+ for (const FunctionDecl *Prev = Old; Prev;
+ Prev = Prev->getPreviousDeclaration())
+ const_cast<FunctionDecl *>(Prev)->setC99InlineDefinition(false);
+ }
+
+ // Merge "pure" flag.
+ if (Old->isPure())
+ New->setPure();
+
+ // Merge the "deleted" flag.
+ if (Old->isDeleted())
+ New->setDeleted();
+
+ if (getLangOptions().CPlusPlus)
+ return MergeCXXFunctionDecl(New, Old);
+
+ return false;
+}
+
+/// MergeVarDecl - We just parsed a variable 'New' which has the same name
+/// and scope as a previous declaration 'Old'. Figure out how to resolve this
+/// situation, merging decls or emitting diagnostics as appropriate.
+///
+/// Tentative definition rules (C99 6.9.2p2) are checked by
+/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
+/// definitions here, since the initializer hasn't been attached.
+///
+void Sema::MergeVarDecl(VarDecl *New, Decl *OldD) {
+ // If either decl is invalid, make sure the new one is marked invalid and
+ // don't do any other checking.
+ if (New->isInvalidDecl() || OldD->isInvalidDecl())
+ return New->setInvalidDecl();
+
+ // Verify the old decl was also a variable.
+ VarDecl *Old = dyn_cast<VarDecl>(OldD);
+ if (!Old) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ MergeAttributes(New, Old, Context);
+
+ // Merge the types
+ QualType MergedT;
+ if (getLangOptions().CPlusPlus) {
+ if (Context.hasSameType(New->getType(), Old->getType()))
+ MergedT = New->getType();
+ } else {
+ MergedT = Context.mergeTypes(New->getType(), Old->getType());
+ }
+ if (MergedT.isNull()) {
+ Diag(New->getLocation(), diag::err_redefinition_different_type)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+ New->setType(MergedT);
+
+ // C99 6.2.2p4: Check if we have a static decl followed by a non-static.
+ if (New->getStorageClass() == VarDecl::Static &&
+ (Old->getStorageClass() == VarDecl::None || Old->hasExternalStorage())) {
+ Diag(New->getLocation(), diag::err_static_non_static) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+ // C99 6.2.2p4:
+ // For an identifier declared with the storage-class specifier
+ // extern in a scope in which a prior declaration of that
+ // identifier is visible,23) if the prior declaration specifies
+ // internal or external linkage, the linkage of the identifier at
+ // the later declaration is the same as the linkage specified at
+ // the prior declaration. If no prior declaration is visible, or
+ // if the prior declaration specifies no linkage, then the
+ // identifier has external linkage.
+ if (New->hasExternalStorage() && Old->hasLinkage())
+ /* Okay */;
+ else if (New->getStorageClass() != VarDecl::Static &&
+ Old->getStorageClass() == VarDecl::Static) {
+ Diag(New->getLocation(), diag::err_non_static_static) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
+
+ // FIXME: The test for external storage here seems wrong? We still
+ // need to check for mismatches.
+ if (!New->hasExternalStorage() && !New->isFileVarDecl() &&
+ // Don't complain about out-of-line definitions of static members.
+ !(Old->getLexicalDeclContext()->isRecord() &&
+ !New->getLexicalDeclContext()->isRecord())) {
+ Diag(New->getLocation(), diag::err_redefinition) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ if (New->isThreadSpecified() && !Old->isThreadSpecified()) {
+ Diag(New->getLocation(), diag::err_thread_non_thread) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ } else if (!New->isThreadSpecified() && Old->isThreadSpecified()) {
+ Diag(New->getLocation(), diag::err_non_thread_thread) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ }
+
+ // Keep a chain of previous declarations.
+ New->setPreviousDeclaration(Old);
+}
+
+/// CheckParmsForFunctionDef - Check that the parameters of the given
+/// function are appropriate for the definition of a function. This
+/// takes care of any checks that cannot be performed on the
+/// declaration itself, e.g., that the types of each of the function
+/// parameters are complete.
+bool Sema::CheckParmsForFunctionDef(FunctionDecl *FD) {
+ bool HasInvalidParm = false;
+ for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+
+ // C99 6.7.5.3p4: the parameters in a parameter type list in a
+ // function declarator that is part of a function definition of
+ // that function shall not have incomplete type.
+ //
+ // This is also C++ [dcl.fct]p6.
+ if (!Param->isInvalidDecl() &&
+ RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Param->setInvalidDecl();
+ HasInvalidParm = true;
+ }
+
+ // C99 6.9.1p5: If the declarator includes a parameter type list, the
+ // declaration of each parameter shall include an identifier.
+ if (Param->getIdentifier() == 0 &&
+ !Param->isImplicit() &&
+ !getLangOptions().CPlusPlus)
+ Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+ }
+
+ return HasInvalidParm;
+}
+
+/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
+/// no declarator (e.g. "struct foo;") is parsed.
+Sema::DeclPtrTy Sema::ParsedFreeStandingDeclSpec(Scope *S, DeclSpec &DS) {
+ // FIXME: Error on auto/register at file scope
+ // FIXME: Error on inline/virtual/explicit
+ // FIXME: Error on invalid restrict
+ // FIXME: Warn on useless __thread
+ // FIXME: Warn on useless const/volatile
+ // FIXME: Warn on useless static/extern/typedef/private_extern/mutable
+ // FIXME: Warn on useless attributes
+ TagDecl *Tag = 0;
+ if (DS.getTypeSpecType() == DeclSpec::TST_class ||
+ DS.getTypeSpecType() == DeclSpec::TST_struct ||
+ DS.getTypeSpecType() == DeclSpec::TST_union ||
+ DS.getTypeSpecType() == DeclSpec::TST_enum) {
+ if (!DS.getTypeRep()) // We probably had an error
+ return DeclPtrTy();
+
+ Tag = dyn_cast<TagDecl>(static_cast<Decl *>(DS.getTypeRep()));
+ }
+
+ if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) {
+ if (!Record->getDeclName() && Record->isDefinition() &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
+ if (getLangOptions().CPlusPlus ||
+ Record->getDeclContext()->isRecord())
+ return BuildAnonymousStructOrUnion(S, DS, Record);
+
+ Diag(DS.getSourceRange().getBegin(), diag::err_no_declarators)
+ << DS.getSourceRange();
+ }
+
+ // Microsoft allows unnamed struct/union fields. Don't complain
+ // about them.
+ // FIXME: Should we support Microsoft's extensions in this area?
+ if (Record->getDeclName() && getLangOptions().Microsoft)
+ return DeclPtrTy::make(Tag);
+ }
+
+ if (!DS.isMissingDeclaratorOk() &&
+ DS.getTypeSpecType() != DeclSpec::TST_error) {
+ // Warn about typedefs of enums without names, since this is an
+ // extension in both Microsoft an GNU.
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef &&
+ Tag && isa<EnumDecl>(Tag)) {
+ Diag(DS.getSourceRange().getBegin(), diag::ext_typedef_without_a_name)
+ << DS.getSourceRange();
+ return DeclPtrTy::make(Tag);
+ }
+
+ Diag(DS.getSourceRange().getBegin(), diag::err_no_declarators)
+ << DS.getSourceRange();
+ return DeclPtrTy();
+ }
+
+ return DeclPtrTy::make(Tag);
+}
+
+/// InjectAnonymousStructOrUnionMembers - Inject the members of the
+/// anonymous struct or union AnonRecord into the owning context Owner
+/// and scope S. This routine will be invoked just after we realize
+/// that an unnamed union or struct is actually an anonymous union or
+/// struct, e.g.,
+///
+/// @code
+/// union {
+/// int i;
+/// float f;
+/// }; // InjectAnonymousStructOrUnionMembers called here to inject i and
+/// // f into the surrounding scope.x
+/// @endcode
+///
+/// This routine is recursive, injecting the names of nested anonymous
+/// structs/unions into the owning context and scope as well.
+bool Sema::InjectAnonymousStructOrUnionMembers(Scope *S, DeclContext *Owner,
+ RecordDecl *AnonRecord) {
+ bool Invalid = false;
+ for (RecordDecl::field_iterator F = AnonRecord->field_begin(Context),
+ FEnd = AnonRecord->field_end(Context);
+ F != FEnd; ++F) {
+ if ((*F)->getDeclName()) {
+ NamedDecl *PrevDecl = LookupQualifiedName(Owner, (*F)->getDeclName(),
+ LookupOrdinaryName, true);
+ if (PrevDecl && !isa<TagDecl>(PrevDecl)) {
+ // C++ [class.union]p2:
+ // The names of the members of an anonymous union shall be
+ // distinct from the names of any other entity in the
+ // scope in which the anonymous union is declared.
+ unsigned diagKind
+ = AnonRecord->isUnion()? diag::err_anonymous_union_member_redecl
+ : diag::err_anonymous_struct_member_redecl;
+ Diag((*F)->getLocation(), diagKind)
+ << (*F)->getDeclName();
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ Invalid = true;
+ } else {
+ // C++ [class.union]p2:
+ // For the purpose of name lookup, after the anonymous union
+ // definition, the members of the anonymous union are
+ // considered to have been defined in the scope in which the
+ // anonymous union is declared.
+ Owner->makeDeclVisibleInContext(Context, *F);
+ S->AddDecl(DeclPtrTy::make(*F));
+ IdResolver.AddDecl(*F);
+ }
+ } else if (const RecordType *InnerRecordType
+ = (*F)->getType()->getAsRecordType()) {
+ RecordDecl *InnerRecord = InnerRecordType->getDecl();
+ if (InnerRecord->isAnonymousStructOrUnion())
+ Invalid = Invalid ||
+ InjectAnonymousStructOrUnionMembers(S, Owner, InnerRecord);
+ }
+ }
+
+ return Invalid;
+}
+
+/// ActOnAnonymousStructOrUnion - Handle the declaration of an
+/// anonymous structure or union. Anonymous unions are a C++ feature
+/// (C++ [class.union]) and a GNU C extension; anonymous structures
+/// are a GNU C and GNU C++ extension.
+Sema::DeclPtrTy Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
+ RecordDecl *Record) {
+ DeclContext *Owner = Record->getDeclContext();
+
+ // Diagnose whether this anonymous struct/union is an extension.
+ if (Record->isUnion() && !getLangOptions().CPlusPlus)
+ Diag(Record->getLocation(), diag::ext_anonymous_union);
+ else if (!Record->isUnion())
+ Diag(Record->getLocation(), diag::ext_anonymous_struct);
+
+ // C and C++ require different kinds of checks for anonymous
+ // structs/unions.
+ bool Invalid = false;
+ if (getLangOptions().CPlusPlus) {
+ const char* PrevSpec = 0;
+ // C++ [class.union]p3:
+ // Anonymous unions declared in a named namespace or in the
+ // global namespace shall be declared static.
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
+ (isa<TranslationUnitDecl>(Owner) ||
+ (isa<NamespaceDecl>(Owner) &&
+ cast<NamespaceDecl>(Owner)->getDeclName()))) {
+ Diag(Record->getLocation(), diag::err_anonymous_union_not_static);
+ Invalid = true;
+
+ // Recover by adding 'static'.
+ DS.SetStorageClassSpec(DeclSpec::SCS_static, SourceLocation(), PrevSpec);
+ }
+ // C++ [class.union]p3:
+ // A storage class is not allowed in a declaration of an
+ // anonymous union in a class scope.
+ else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
+ isa<RecordDecl>(Owner)) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_anonymous_union_with_storage_spec);
+ Invalid = true;
+
+ // Recover by removing the storage specifier.
+ DS.SetStorageClassSpec(DeclSpec::SCS_unspecified, SourceLocation(),
+ PrevSpec);
+ }
+
+ // C++ [class.union]p2:
+ // The member-specification of an anonymous union shall only
+ // define non-static data members. [Note: nested types and
+ // functions cannot be declared within an anonymous union. ]
+ for (DeclContext::decl_iterator Mem = Record->decls_begin(Context),
+ MemEnd = Record->decls_end(Context);
+ Mem != MemEnd; ++Mem) {
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(*Mem)) {
+ // C++ [class.union]p3:
+ // An anonymous union shall not have private or protected
+ // members (clause 11).
+ if (FD->getAccess() == AS_protected || FD->getAccess() == AS_private) {
+ Diag(FD->getLocation(), diag::err_anonymous_record_nonpublic_member)
+ << (int)Record->isUnion() << (int)(FD->getAccess() == AS_protected);
+ Invalid = true;
+ }
+ } else if ((*Mem)->isImplicit()) {
+ // Any implicit members are fine.
+ } else if (isa<TagDecl>(*Mem) && (*Mem)->getDeclContext() != Record) {
+ // This is a type that showed up in an
+ // elaborated-type-specifier inside the anonymous struct or
+ // union, but which actually declares a type outside of the
+ // anonymous struct or union. It's okay.
+ } else if (RecordDecl *MemRecord = dyn_cast<RecordDecl>(*Mem)) {
+ if (!MemRecord->isAnonymousStructOrUnion() &&
+ MemRecord->getDeclName()) {
+ // This is a nested type declaration.
+ Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
+ << (int)Record->isUnion();
+ Invalid = true;
+ }
+ } else {
+ // We have something that isn't a non-static data
+ // member. Complain about it.
+ unsigned DK = diag::err_anonymous_record_bad_member;
+ if (isa<TypeDecl>(*Mem))
+ DK = diag::err_anonymous_record_with_type;
+ else if (isa<FunctionDecl>(*Mem))
+ DK = diag::err_anonymous_record_with_function;
+ else if (isa<VarDecl>(*Mem))
+ DK = diag::err_anonymous_record_with_static;
+ Diag((*Mem)->getLocation(), DK)
+ << (int)Record->isUnion();
+ Invalid = true;
+ }
+ }
+ }
+
+ if (!Record->isUnion() && !Owner->isRecord()) {
+ Diag(Record->getLocation(), diag::err_anonymous_struct_not_member)
+ << (int)getLangOptions().CPlusPlus;
+ Invalid = true;
+ }
+
+ // Create a declaration for this anonymous struct/union.
+ NamedDecl *Anon = 0;
+ if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
+ Anon = FieldDecl::Create(Context, OwningClass, Record->getLocation(),
+ /*IdentifierInfo=*/0,
+ Context.getTypeDeclType(Record),
+ /*BitWidth=*/0, /*Mutable=*/false);
+ Anon->setAccess(AS_public);
+ if (getLangOptions().CPlusPlus)
+ FieldCollector->Add(cast<FieldDecl>(Anon));
+ } else {
+ VarDecl::StorageClass SC;
+ switch (DS.getStorageClassSpec()) {
+ default: assert(0 && "Unknown storage class!");
+ case DeclSpec::SCS_unspecified: SC = VarDecl::None; break;
+ case DeclSpec::SCS_extern: SC = VarDecl::Extern; break;
+ case DeclSpec::SCS_static: SC = VarDecl::Static; break;
+ case DeclSpec::SCS_auto: SC = VarDecl::Auto; break;
+ case DeclSpec::SCS_register: SC = VarDecl::Register; break;
+ case DeclSpec::SCS_private_extern: SC = VarDecl::PrivateExtern; break;
+ case DeclSpec::SCS_mutable:
+ // mutable can only appear on non-static class members, so it's always
+ // an error here
+ Diag(Record->getLocation(), diag::err_mutable_nonmember);
+ Invalid = true;
+ SC = VarDecl::None;
+ break;
+ }
+
+ Anon = VarDecl::Create(Context, Owner, Record->getLocation(),
+ /*IdentifierInfo=*/0,
+ Context.getTypeDeclType(Record),
+ SC, DS.getSourceRange().getBegin());
+ }
+ Anon->setImplicit();
+
+ // Add the anonymous struct/union object to the current
+ // context. We'll be referencing this object when we refer to one of
+ // its members.
+ Owner->addDecl(Context, Anon);
+
+ // Inject the members of the anonymous struct/union into the owning
+ // context and into the identifier resolver chain for name lookup
+ // purposes.
+ if (InjectAnonymousStructOrUnionMembers(S, Owner, Record))
+ Invalid = true;
+
+ // Mark this as an anonymous struct/union type. Note that we do not
+ // do this until after we have already checked and injected the
+ // members of this anonymous struct/union type, because otherwise
+ // the members could be injected twice: once by DeclContext when it
+ // builds its lookup table, and once by
+ // InjectAnonymousStructOrUnionMembers.
+ Record->setAnonymousStructOrUnion(true);
+
+ if (Invalid)
+ Anon->setInvalidDecl();
+
+ return DeclPtrTy::make(Anon);
+}
+
+
+/// GetNameForDeclarator - Determine the full declaration name for the
+/// given Declarator.
+DeclarationName Sema::GetNameForDeclarator(Declarator &D) {
+ switch (D.getKind()) {
+ case Declarator::DK_Abstract:
+ assert(D.getIdentifier() == 0 && "abstract declarators have no name");
+ return DeclarationName();
+
+ case Declarator::DK_Normal:
+ assert (D.getIdentifier() != 0 && "normal declarators have an identifier");
+ return DeclarationName(D.getIdentifier());
+
+ case Declarator::DK_Constructor: {
+ QualType Ty = QualType::getFromOpaquePtr(D.getDeclaratorIdType());
+ Ty = Context.getCanonicalType(Ty);
+ return Context.DeclarationNames.getCXXConstructorName(Ty);
+ }
+
+ case Declarator::DK_Destructor: {
+ QualType Ty = QualType::getFromOpaquePtr(D.getDeclaratorIdType());
+ Ty = Context.getCanonicalType(Ty);
+ return Context.DeclarationNames.getCXXDestructorName(Ty);
+ }
+
+ case Declarator::DK_Conversion: {
+ // FIXME: We'd like to keep the non-canonical type for diagnostics!
+ QualType Ty = QualType::getFromOpaquePtr(D.getDeclaratorIdType());
+ Ty = Context.getCanonicalType(Ty);
+ return Context.DeclarationNames.getCXXConversionFunctionName(Ty);
+ }
+
+ case Declarator::DK_Operator:
+ assert(D.getIdentifier() == 0 && "operator names have no identifier");
+ return Context.DeclarationNames.getCXXOperatorName(
+ D.getOverloadedOperator());
+ }
+
+ assert(false && "Unknown name kind");
+ return DeclarationName();
+}
+
+/// isNearlyMatchingFunction - Determine whether the C++ functions
+/// Declaration and Definition are "nearly" matching. This heuristic
+/// is used to improve diagnostics in the case where an out-of-line
+/// function definition doesn't match any declaration within
+/// the class or namespace.
+static bool isNearlyMatchingFunction(ASTContext &Context,
+ FunctionDecl *Declaration,
+ FunctionDecl *Definition) {
+ if (Declaration->param_size() != Definition->param_size())
+ return false;
+ for (unsigned Idx = 0; Idx < Declaration->param_size(); ++Idx) {
+ QualType DeclParamTy = Declaration->getParamDecl(Idx)->getType();
+ QualType DefParamTy = Definition->getParamDecl(Idx)->getType();
+
+ DeclParamTy = Context.getCanonicalType(DeclParamTy.getNonReferenceType());
+ DefParamTy = Context.getCanonicalType(DefParamTy.getNonReferenceType());
+ if (DeclParamTy.getUnqualifiedType() != DefParamTy.getUnqualifiedType())
+ return false;
+ }
+
+ return true;
+}
+
+Sema::DeclPtrTy
+Sema::ActOnDeclarator(Scope *S, Declarator &D, bool IsFunctionDefinition) {
+ DeclarationName Name = GetNameForDeclarator(D);
+
+ // All of these full declarators require an identifier. If it doesn't have
+ // one, the ParsedFreeStandingDeclSpec action should be used.
+ if (!Name) {
+ if (!D.isInvalidType()) // Reject this if we think it is valid.
+ Diag(D.getDeclSpec().getSourceRange().getBegin(),
+ diag::err_declarator_need_ident)
+ << D.getDeclSpec().getSourceRange() << D.getSourceRange();
+ return DeclPtrTy();
+ }
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ DeclContext *DC;
+ NamedDecl *PrevDecl;
+ NamedDecl *New;
+
+ QualType R = GetTypeForDeclarator(D, S);
+
+ // See if this is a redefinition of a variable in the same scope.
+ if (D.getCXXScopeSpec().isInvalid()) {
+ DC = CurContext;
+ PrevDecl = 0;
+ D.setInvalidType();
+ } else if (!D.getCXXScopeSpec().isSet()) {
+ LookupNameKind NameKind = LookupOrdinaryName;
+
+ // If the declaration we're planning to build will be a function
+ // or object with linkage, then look for another declaration with
+ // linkage (C99 6.2.2p4-5 and C++ [basic.link]p6).
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ /* Do nothing*/;
+ else if (R->isFunctionType()) {
+ if (CurContext->isFunctionOrMethod())
+ NameKind = LookupRedeclarationWithLinkage;
+ } else if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern)
+ NameKind = LookupRedeclarationWithLinkage;
+
+ DC = CurContext;
+ PrevDecl = LookupName(S, Name, NameKind, true,
+ D.getDeclSpec().getStorageClassSpec() !=
+ DeclSpec::SCS_static,
+ D.getIdentifierLoc());
+ } else { // Something like "int foo::x;"
+ DC = computeDeclContext(D.getCXXScopeSpec());
+ // FIXME: RequireCompleteDeclContext(D.getCXXScopeSpec()); ?
+ PrevDecl = LookupQualifiedName(DC, Name, LookupOrdinaryName, true);
+
+ // C++ 7.3.1.2p2:
+ // Members (including explicit specializations of templates) of a named
+ // namespace can also be defined outside that namespace by explicit
+ // qualification of the name being defined, provided that the entity being
+ // defined was already declared in the namespace and the definition appears
+ // after the point of declaration in a namespace that encloses the
+ // declarations namespace.
+ //
+ // Note that we only check the context at this point. We don't yet
+ // have enough information to make sure that PrevDecl is actually
+ // the declaration we want to match. For example, given:
+ //
+ // class X {
+ // void f();
+ // void f(float);
+ // };
+ //
+ // void X::f(int) { } // ill-formed
+ //
+ // In this case, PrevDecl will point to the overload set
+ // containing the two f's declared in X, but neither of them
+ // matches.
+
+ // First check whether we named the global scope.
+ if (isa<TranslationUnitDecl>(DC)) {
+ Diag(D.getIdentifierLoc(), diag::err_invalid_declarator_global_scope)
+ << Name << D.getCXXScopeSpec().getRange();
+ } else if (!CurContext->Encloses(DC)) {
+ // The qualifying scope doesn't enclose the original declaration.
+ // Emit diagnostic based on current scope.
+ SourceLocation L = D.getIdentifierLoc();
+ SourceRange R = D.getCXXScopeSpec().getRange();
+ if (isa<FunctionDecl>(CurContext))
+ Diag(L, diag::err_invalid_declarator_in_function) << Name << R;
+ else
+ Diag(L, diag::err_invalid_declarator_scope)
+ << Name << cast<NamedDecl>(DC) << R;
+ D.setInvalidType();
+ }
+ }
+
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ if (!D.isInvalidType())
+ if (DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl))
+ D.setInvalidType();
+
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (PrevDecl && PrevDecl->getIdentifierNamespace() == Decl::IDNS_Tag &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef)
+ PrevDecl = 0;
+
+ bool Redeclaration = false;
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ New = ActOnTypedefDeclarator(S, D, DC, R, PrevDecl, Redeclaration);
+ } else if (R->isFunctionType()) {
+ New = ActOnFunctionDeclarator(S, D, DC, R, PrevDecl,
+ IsFunctionDefinition, Redeclaration);
+ } else {
+ New = ActOnVariableDeclarator(S, D, DC, R, PrevDecl, Redeclaration);
+ }
+
+ if (New == 0)
+ return DeclPtrTy();
+
+ // If this has an identifier and is not an invalid redeclaration,
+ // add it to the scope stack.
+ if (Name && !(Redeclaration && New->isInvalidDecl()))
+ PushOnScopeChains(New, S);
+
+ return DeclPtrTy::make(New);
+}
+
+/// TryToFixInvalidVariablyModifiedType - Helper method to turn variable array
+/// types into constant array types in certain situations which would otherwise
+/// be errors (for GCC compatibility).
+static QualType TryToFixInvalidVariablyModifiedType(QualType T,
+ ASTContext &Context,
+ bool &SizeIsNegative) {
+ // This method tries to turn a variable array into a constant
+ // array even when the size isn't an ICE. This is necessary
+ // for compatibility with code that depends on gcc's buggy
+ // constant expression folding, like struct {char x[(int)(char*)2];}
+ SizeIsNegative = false;
+
+ if (const PointerType* PTy = dyn_cast<PointerType>(T)) {
+ QualType Pointee = PTy->getPointeeType();
+ QualType FixedType =
+ TryToFixInvalidVariablyModifiedType(Pointee, Context, SizeIsNegative);
+ if (FixedType.isNull()) return FixedType;
+ FixedType = Context.getPointerType(FixedType);
+ FixedType.setCVRQualifiers(T.getCVRQualifiers());
+ return FixedType;
+ }
+
+ const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T);
+ if (!VLATy)
+ return QualType();
+ // FIXME: We should probably handle this case
+ if (VLATy->getElementType()->isVariablyModifiedType())
+ return QualType();
+
+ Expr::EvalResult EvalResult;
+ if (!VLATy->getSizeExpr() ||
+ !VLATy->getSizeExpr()->Evaluate(EvalResult, Context) ||
+ !EvalResult.Val.isInt())
+ return QualType();
+
+ llvm::APSInt &Res = EvalResult.Val.getInt();
+ if (Res >= llvm::APSInt(Res.getBitWidth(), Res.isUnsigned()))
+ return Context.getConstantArrayType(VLATy->getElementType(),
+ Res, ArrayType::Normal, 0);
+
+ SizeIsNegative = true;
+ return QualType();
+}
+
+/// \brief Register the given locally-scoped external C declaration so
+/// that it can be found later for redeclarations
+void
+Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND, NamedDecl *PrevDecl,
+ Scope *S) {
+ assert(ND->getLexicalDeclContext()->isFunctionOrMethod() &&
+ "Decl is not a locally-scoped decl!");
+ // Note that we have a locally-scoped external with this name.
+ LocallyScopedExternalDecls[ND->getDeclName()] = ND;
+
+ if (!PrevDecl)
+ return;
+
+ // If there was a previous declaration of this variable, it may be
+ // in our identifier chain. Update the identifier chain with the new
+ // declaration.
+ if (S && IdResolver.ReplaceDecl(PrevDecl, ND)) {
+ // The previous declaration was found on the identifer resolver
+ // chain, so remove it from its scope.
+ while (S && !S->isDeclScope(DeclPtrTy::make(PrevDecl)))
+ S = S->getParent();
+
+ if (S)
+ S->RemoveDecl(DeclPtrTy::make(PrevDecl));
+ }
+}
+
+/// \brief Diagnose function specifiers on a declaration of an identifier that
+/// does not identify a function.
+void Sema::DiagnoseFunctionSpecifiers(Declarator& D) {
+ // FIXME: We should probably indicate the identifier in question to avoid
+ // confusion for constructs like "inline int a(), b;"
+ if (D.getDeclSpec().isInlineSpecified())
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::err_inline_non_function);
+
+ if (D.getDeclSpec().isVirtualSpecified())
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_non_function);
+
+ if (D.getDeclSpec().isExplicitSpecified())
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_non_function);
+}
+
+NamedDecl*
+Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ QualType R, Decl* PrevDecl, bool &Redeclaration) {
+ // Typedef declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_typedef_declarator)
+ << D.getCXXScopeSpec().getRange();
+ D.setInvalidType();
+ // Pretend we didn't see the scope specifier.
+ DC = 0;
+ }
+
+ if (getLangOptions().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ DiagnoseFunctionSpecifiers(D);
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+
+ TypedefDecl *NewTD = ParseTypedefDecl(S, D, R);
+ if (!NewTD) return 0;
+
+ if (D.isInvalidType())
+ NewTD->setInvalidDecl();
+
+ // Handle attributes prior to checking for duplicates in MergeVarDecl
+ ProcessDeclAttributes(NewTD, D);
+ // Merge the decl with the existing one if appropriate. If the decl is
+ // in an outer scope, it isn't the same thing.
+ if (PrevDecl && isDeclInScope(PrevDecl, DC, S)) {
+ Redeclaration = true;
+ MergeTypeDefDecl(NewTD, PrevDecl);
+ }
+
+ // C99 6.7.7p2: If a typedef name specifies a variably modified type
+ // then it shall have block scope.
+ QualType T = NewTD->getUnderlyingType();
+ if (T->isVariablyModifiedType()) {
+ CurFunctionNeedsScopeChecking = true;
+
+ if (S->getFnParent() == 0) {
+ bool SizeIsNegative;
+ QualType FixedTy =
+ TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative);
+ if (!FixedTy.isNull()) {
+ Diag(D.getIdentifierLoc(), diag::warn_illegal_constant_array_size);
+ NewTD->setUnderlyingType(FixedTy);
+ } else {
+ if (SizeIsNegative)
+ Diag(D.getIdentifierLoc(), diag::err_typecheck_negative_array_size);
+ else if (T->isVariableArrayType())
+ Diag(D.getIdentifierLoc(), diag::err_vla_decl_in_file_scope);
+ else
+ Diag(D.getIdentifierLoc(), diag::err_vm_decl_in_file_scope);
+ NewTD->setInvalidDecl();
+ }
+ }
+ }
+ return NewTD;
+}
+
+/// \brief Determines whether the given declaration is an out-of-scope
+/// previous declaration.
+///
+/// This routine should be invoked when name lookup has found a
+/// previous declaration (PrevDecl) that is not in the scope where a
+/// new declaration by the same name is being introduced. If the new
+/// declaration occurs in a local scope, previous declarations with
+/// linkage may still be considered previous declarations (C99
+/// 6.2.2p4-5, C++ [basic.link]p6).
+///
+/// \param PrevDecl the previous declaration found by name
+/// lookup
+///
+/// \param DC the context in which the new declaration is being
+/// declared.
+///
+/// \returns true if PrevDecl is an out-of-scope previous declaration
+/// for a new delcaration with the same name.
+static bool
+isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC,
+ ASTContext &Context) {
+ if (!PrevDecl)
+ return 0;
+
+ // FIXME: PrevDecl could be an OverloadedFunctionDecl, in which
+ // case we need to check each of the overloaded functions.
+ if (!PrevDecl->hasLinkage())
+ return false;
+
+ if (Context.getLangOptions().CPlusPlus) {
+ // C++ [basic.link]p6:
+ // If there is a visible declaration of an entity with linkage
+ // having the same name and type, ignoring entities declared
+ // outside the innermost enclosing namespace scope, the block
+ // scope declaration declares that same entity and receives the
+ // linkage of the previous declaration.
+ DeclContext *OuterContext = DC->getLookupContext();
+ if (!OuterContext->isFunctionOrMethod())
+ // This rule only applies to block-scope declarations.
+ return false;
+ else {
+ DeclContext *PrevOuterContext = PrevDecl->getDeclContext();
+ if (PrevOuterContext->isRecord())
+ // We found a member function: ignore it.
+ return false;
+ else {
+ // Find the innermost enclosing namespace for the new and
+ // previous declarations.
+ while (!OuterContext->isFileContext())
+ OuterContext = OuterContext->getParent();
+ while (!PrevOuterContext->isFileContext())
+ PrevOuterContext = PrevOuterContext->getParent();
+
+ // The previous declaration is in a different namespace, so it
+ // isn't the same function.
+ if (OuterContext->getPrimaryContext() !=
+ PrevOuterContext->getPrimaryContext())
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+NamedDecl*
+Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ QualType R,NamedDecl* PrevDecl,
+ bool &Redeclaration) {
+ DeclarationName Name = GetNameForDeclarator(D);
+
+ // Check that there are no default arguments (C++ only).
+ if (getLangOptions().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ VarDecl *NewVD;
+ VarDecl::StorageClass SC;
+ switch (D.getDeclSpec().getStorageClassSpec()) {
+ default: assert(0 && "Unknown storage class!");
+ case DeclSpec::SCS_unspecified: SC = VarDecl::None; break;
+ case DeclSpec::SCS_extern: SC = VarDecl::Extern; break;
+ case DeclSpec::SCS_static: SC = VarDecl::Static; break;
+ case DeclSpec::SCS_auto: SC = VarDecl::Auto; break;
+ case DeclSpec::SCS_register: SC = VarDecl::Register; break;
+ case DeclSpec::SCS_private_extern: SC = VarDecl::PrivateExtern; break;
+ case DeclSpec::SCS_mutable:
+ // mutable can only appear on non-static class members, so it's always
+ // an error here
+ Diag(D.getIdentifierLoc(), diag::err_mutable_nonmember);
+ D.setInvalidType();
+ SC = VarDecl::None;
+ break;
+ }
+
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ if (!II) {
+ Diag(D.getIdentifierLoc(), diag::err_bad_variable_name)
+ << Name.getAsString();
+ return 0;
+ }
+
+ DiagnoseFunctionSpecifiers(D);
+
+ if (!DC->isRecord() && S->getFnParent() == 0) {
+ // C99 6.9p2: The storage-class specifiers auto and register shall not
+ // appear in the declaration specifiers in an external declaration.
+ if (SC == VarDecl::Auto || SC == VarDecl::Register) {
+
+ // If this is a register variable with an asm label specified, then this
+ // is a GNU extension.
+ if (SC == VarDecl::Register && D.getAsmLabel())
+ Diag(D.getIdentifierLoc(), diag::err_unsupported_global_register);
+ else
+ Diag(D.getIdentifierLoc(), diag::err_typecheck_sclass_fscope);
+ D.setInvalidType();
+ }
+ }
+ if (DC->isRecord() && !CurContext->isRecord()) {
+ // This is an out-of-line definition of a static data member.
+ if (SC == VarDecl::Static) {
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << CodeModificationHint::CreateRemoval(
+ SourceRange(D.getDeclSpec().getStorageClassSpecLoc()));
+ } else if (SC == VarDecl::None)
+ SC = VarDecl::Static;
+ }
+
+ // The variable can not
+ NewVD = VarDecl::Create(Context, DC, D.getIdentifierLoc(),
+ II, R, SC,
+ // FIXME: Move to DeclGroup...
+ D.getDeclSpec().getSourceRange().getBegin());
+
+ if (D.isInvalidType())
+ NewVD->setInvalidDecl();
+
+ if (D.getDeclSpec().isThreadSpecified()) {
+ if (NewVD->hasLocalStorage())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_thread_non_global);
+ else if (!Context.Target.isTLSSupported())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_thread_unsupported);
+ else
+ NewVD->setThreadSpecified(true);
+ }
+
+ // Set the lexical context. If the declarator has a C++ scope specifier, the
+ // lexical context will be different from the semantic context.
+ NewVD->setLexicalDeclContext(CurContext);
+
+ // Handle attributes prior to checking for duplicates in MergeVarDecl
+ ProcessDeclAttributes(NewVD, D);
+
+ // Handle GNU asm-label extension (encoded as an attribute).
+ if (Expr *E = (Expr*) D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ NewVD->addAttr(::new (Context) AsmLabelAttr(std::string(SE->getStrData(),
+ SE->getByteLength())));
+ }
+
+ // If name lookup finds a previous declaration that is not in the
+ // same scope as the new declaration, this may still be an
+ // acceptable redeclaration.
+ if (PrevDecl && !isDeclInScope(PrevDecl, DC, S) &&
+ !(NewVD->hasLinkage() &&
+ isOutOfScopePreviousDeclaration(PrevDecl, DC, Context)))
+ PrevDecl = 0;
+
+ // Merge the decl with the existing one if appropriate.
+ if (PrevDecl) {
+ if (isa<FieldDecl>(PrevDecl) && D.getCXXScopeSpec().isSet()) {
+ // The user tried to define a non-static data member
+ // out-of-line (C++ [dcl.meaning]p1).
+ Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line)
+ << D.getCXXScopeSpec().getRange();
+ PrevDecl = 0;
+ NewVD->setInvalidDecl();
+ }
+ } else if (D.getCXXScopeSpec().isSet()) {
+ // No previous declaration in the qualifying scope.
+ Diag(D.getIdentifierLoc(), diag::err_typecheck_no_member)
+ << Name << D.getCXXScopeSpec().getRange();
+ NewVD->setInvalidDecl();
+ }
+
+ CheckVariableDeclaration(NewVD, PrevDecl, Redeclaration);
+
+ // If this is a locally-scoped extern C variable, update the map of
+ // such variables.
+ if (CurContext->isFunctionOrMethod() && NewVD->isExternC(Context) &&
+ !NewVD->isInvalidDecl())
+ RegisterLocallyScopedExternCDecl(NewVD, PrevDecl, S);
+
+ return NewVD;
+}
+
+/// \brief Perform semantic checking on a newly-created variable
+/// declaration.
+///
+/// This routine performs all of the type-checking required for a
+/// variable declaration once it has been built. It is used both to
+/// check variables after they have been parsed and their declarators
+/// have been translated into a declaration, and to check variables
+/// that have been instantiated from a template.
+///
+/// Sets NewVD->isInvalidDecl() if an error was encountered.
+void Sema::CheckVariableDeclaration(VarDecl *NewVD, NamedDecl *PrevDecl,
+ bool &Redeclaration) {
+ // If the decl is already known invalid, don't check it.
+ if (NewVD->isInvalidDecl())
+ return;
+
+ QualType T = NewVD->getType();
+
+ if (T->isObjCInterfaceType()) {
+ Diag(NewVD->getLocation(), diag::err_statically_allocated_object);
+ return NewVD->setInvalidDecl();
+ }
+
+ // The variable can not have an abstract class type.
+ if (RequireNonAbstractType(NewVD->getLocation(), T,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ return NewVD->setInvalidDecl();
+
+ // Emit an error if an address space was applied to decl with local storage.
+ // This includes arrays of objects with address space qualifiers, but not
+ // automatic variables that point to other address spaces.
+ // ISO/IEC TR 18037 S5.1.2
+ if (NewVD->hasLocalStorage() && (T.getAddressSpace() != 0)) {
+ Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl);
+ return NewVD->setInvalidDecl();
+ }
+
+ if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
+ && !NewVD->hasAttr<BlocksAttr>())
+ Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
+
+ bool isVM = T->isVariablyModifiedType();
+ if (isVM || NewVD->hasAttr<CleanupAttr>())
+ CurFunctionNeedsScopeChecking = true;
+
+ if ((isVM && NewVD->hasLinkage()) ||
+ (T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
+ bool SizeIsNegative;
+ QualType FixedTy =
+ TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative);
+
+ if (FixedTy.isNull() && T->isVariableArrayType()) {
+ const VariableArrayType *VAT = Context.getAsVariableArrayType(T);
+ // FIXME: This won't give the correct result for
+ // int a[10][n];
+ SourceRange SizeRange = VAT->getSizeExpr()->getSourceRange();
+
+ if (NewVD->isFileVarDecl())
+ Diag(NewVD->getLocation(), diag::err_vla_decl_in_file_scope)
+ << SizeRange;
+ else if (NewVD->getStorageClass() == VarDecl::Static)
+ Diag(NewVD->getLocation(), diag::err_vla_decl_has_static_storage)
+ << SizeRange;
+ else
+ Diag(NewVD->getLocation(), diag::err_vla_decl_has_extern_linkage)
+ << SizeRange;
+ return NewVD->setInvalidDecl();
+ }
+
+ if (FixedTy.isNull()) {
+ if (NewVD->isFileVarDecl())
+ Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope);
+ else
+ Diag(NewVD->getLocation(), diag::err_vm_decl_has_extern_linkage);
+ return NewVD->setInvalidDecl();
+ }
+
+ Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
+ NewVD->setType(FixedTy);
+ }
+
+ if (!PrevDecl && NewVD->isExternC(Context)) {
+ // Since we did not find anything by this name and we're declaring
+ // an extern "C" variable, look for a non-visible extern "C"
+ // declaration with the same name.
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos
+ = LocallyScopedExternalDecls.find(NewVD->getDeclName());
+ if (Pos != LocallyScopedExternalDecls.end())
+ PrevDecl = Pos->second;
+ }
+
+ if (T->isVoidType() && !NewVD->hasExternalStorage()) {
+ Diag(NewVD->getLocation(), diag::err_typecheck_decl_incomplete_type)
+ << T;
+ return NewVD->setInvalidDecl();
+ }
+
+ if (!NewVD->hasLocalStorage() && NewVD->hasAttr<BlocksAttr>()) {
+ Diag(NewVD->getLocation(), diag::err_block_on_nonlocal);
+ return NewVD->setInvalidDecl();
+ }
+
+ if (isVM && NewVD->hasAttr<BlocksAttr>()) {
+ Diag(NewVD->getLocation(), diag::err_block_on_vm);
+ return NewVD->setInvalidDecl();
+ }
+
+ if (PrevDecl) {
+ Redeclaration = true;
+ MergeVarDecl(NewVD, PrevDecl);
+ }
+}
+
+NamedDecl*
+Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ QualType R, NamedDecl* PrevDecl,
+ bool IsFunctionDefinition, bool &Redeclaration) {
+ assert(R.getTypePtr()->isFunctionType());
+
+ DeclarationName Name = GetNameForDeclarator(D);
+ FunctionDecl::StorageClass SC = FunctionDecl::None;
+ switch (D.getDeclSpec().getStorageClassSpec()) {
+ default: assert(0 && "Unknown storage class!");
+ case DeclSpec::SCS_auto:
+ case DeclSpec::SCS_register:
+ case DeclSpec::SCS_mutable:
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_typecheck_sclass_func);
+ D.setInvalidType();
+ break;
+ case DeclSpec::SCS_unspecified: SC = FunctionDecl::None; break;
+ case DeclSpec::SCS_extern: SC = FunctionDecl::Extern; break;
+ case DeclSpec::SCS_static: {
+ if (CurContext->getLookupContext()->isFunctionOrMethod()) {
+ // C99 6.7.1p5:
+ // The declaration of an identifier for a function that has
+ // block scope shall have no explicit storage-class specifier
+ // other than extern
+ // See also (C++ [dcl.stc]p4).
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_block_func);
+ SC = FunctionDecl::None;
+ } else
+ SC = FunctionDecl::Static;
+ break;
+ }
+ case DeclSpec::SCS_private_extern: SC = FunctionDecl::PrivateExtern;break;
+ }
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+
+ bool isInline = D.getDeclSpec().isInlineSpecified();
+ bool isVirtual = D.getDeclSpec().isVirtualSpecified();
+ bool isExplicit = D.getDeclSpec().isExplicitSpecified();
+
+ // Check that the return type is not an abstract class type.
+ // For record types, this is done by the AbstractClassUsageDiagnoser once
+ // the class has been completely parsed.
+ if (!DC->isRecord() &&
+ RequireNonAbstractType(D.getIdentifierLoc(),
+ R->getAsFunctionType()->getResultType(),
+ diag::err_abstract_type_in_decl,
+ AbstractReturnType))
+ D.setInvalidType();
+
+ // Do not allow returning a objc interface by-value.
+ if (R->getAsFunctionType()->getResultType()->isObjCInterfaceType()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_object_cannot_be_passed_returned_by_value) << 0
+ << R->getAsFunctionType()->getResultType();
+ D.setInvalidType();
+ }
+
+ bool isVirtualOkay = false;
+ FunctionDecl *NewFD;
+ if (D.getKind() == Declarator::DK_Constructor) {
+ // This is a C++ constructor declaration.
+ assert(DC->isRecord() &&
+ "Constructors can only be declared in a member context");
+
+ R = CheckConstructorDeclarator(D, R, SC);
+
+ // Create the new declaration
+ NewFD = CXXConstructorDecl::Create(Context,
+ cast<CXXRecordDecl>(DC),
+ D.getIdentifierLoc(), Name, R,
+ isExplicit, isInline,
+ /*isImplicitlyDeclared=*/false);
+ } else if (D.getKind() == Declarator::DK_Destructor) {
+ // This is a C++ destructor declaration.
+ if (DC->isRecord()) {
+ R = CheckDestructorDeclarator(D, SC);
+
+ NewFD = CXXDestructorDecl::Create(Context,
+ cast<CXXRecordDecl>(DC),
+ D.getIdentifierLoc(), Name, R,
+ isInline,
+ /*isImplicitlyDeclared=*/false);
+
+ isVirtualOkay = true;
+ } else {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_not_member);
+
+ // Create a FunctionDecl to satisfy the function definition parsing
+ // code path.
+ NewFD = FunctionDecl::Create(Context, DC, D.getIdentifierLoc(),
+ Name, R, SC, isInline,
+ /*hasPrototype=*/true,
+ // FIXME: Move to DeclGroup...
+ D.getDeclSpec().getSourceRange().getBegin());
+ D.setInvalidType();
+ }
+ } else if (D.getKind() == Declarator::DK_Conversion) {
+ if (!DC->isRecord()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_conv_function_not_member);
+ return 0;
+ }
+
+ CheckConversionDeclarator(D, R, SC);
+ NewFD = CXXConversionDecl::Create(Context, cast<CXXRecordDecl>(DC),
+ D.getIdentifierLoc(), Name, R,
+ isInline, isExplicit);
+
+ isVirtualOkay = true;
+ } else if (DC->isRecord()) {
+ // If the of the function is the same as the name of the record, then this
+ // must be an invalid constructor that has a return type.
+ // (The parser checks for a return type and makes the declarator a
+ // constructor if it has no return type).
+ // must have an invalid constructor that has a return type
+ if (Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){
+ Diag(D.getIdentifierLoc(), diag::err_constructor_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ return 0;
+ }
+
+ // This is a C++ method declaration.
+ NewFD = CXXMethodDecl::Create(Context, cast<CXXRecordDecl>(DC),
+ D.getIdentifierLoc(), Name, R,
+ (SC == FunctionDecl::Static), isInline);
+
+ isVirtualOkay = (SC != FunctionDecl::Static);
+ } else {
+ // Determine whether the function was written with a
+ // prototype. This true when:
+ // - we're in C++ (where every function has a prototype),
+ // - there is a prototype in the declarator, or
+ // - the type R of the function is some kind of typedef or other reference
+ // to a type name (which eventually refers to a function type).
+ bool HasPrototype =
+ getLangOptions().CPlusPlus ||
+ (D.getNumTypeObjects() && D.getTypeObject(0).Fun.hasPrototype) ||
+ (!isa<FunctionType>(R.getTypePtr()) && R->isFunctionProtoType());
+
+ NewFD = FunctionDecl::Create(Context, DC,
+ D.getIdentifierLoc(),
+ Name, R, SC, isInline, HasPrototype,
+ // FIXME: Move to DeclGroup...
+ D.getDeclSpec().getSourceRange().getBegin());
+ }
+
+ if (D.isInvalidType())
+ NewFD->setInvalidDecl();
+
+ // Set the lexical context. If the declarator has a C++
+ // scope specifier, the lexical context will be different
+ // from the semantic context.
+ NewFD->setLexicalDeclContext(CurContext);
+
+ // C++ [dcl.fct.spec]p5:
+ // The virtual specifier shall only be used in declarations of
+ // nonstatic class member functions that appear within a
+ // member-specification of a class declaration; see 10.3.
+ //
+ if (isVirtual && !NewFD->isInvalidDecl()) {
+ if (!isVirtualOkay) {
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_non_function);
+ } else if (!CurContext->isRecord()) {
+ // 'virtual' was specified outside of the class.
+ Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_out_of_class)
+ << CodeModificationHint::CreateRemoval(
+ SourceRange(D.getDeclSpec().getVirtualSpecLoc()));
+ } else {
+ // Okay: Add virtual to the method.
+ cast<CXXMethodDecl>(NewFD)->setVirtualAsWritten(true);
+ CXXRecordDecl *CurClass = cast<CXXRecordDecl>(DC);
+ CurClass->setAggregate(false);
+ CurClass->setPOD(false);
+ CurClass->setPolymorphic(true);
+ CurClass->setHasTrivialConstructor(false);
+ }
+ }
+
+ if (CXXMethodDecl *NewMD = dyn_cast<CXXMethodDecl>(NewFD)) {
+ // Look for virtual methods in base classes that this method might override.
+
+ BasePaths Paths;
+ if (LookupInBases(cast<CXXRecordDecl>(DC),
+ MemberLookupCriteria(NewMD), Paths)) {
+ for (BasePaths::decl_iterator I = Paths.found_decls_begin(),
+ E = Paths.found_decls_end(); I != E; ++I) {
+ if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(*I)) {
+ if (!CheckOverridingFunctionReturnType(NewMD, OldMD))
+ NewMD->addOverriddenMethod(OldMD);
+ }
+ }
+ }
+ }
+
+ if (SC == FunctionDecl::Static && isa<CXXMethodDecl>(NewFD) &&
+ !CurContext->isRecord()) {
+ // C++ [class.static]p1:
+ // A data or function member of a class may be declared static
+ // in a class definition, in which case it is a static member of
+ // the class.
+
+ // Complain about the 'static' specifier if it's on an out-of-line
+ // member function definition.
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << CodeModificationHint::CreateRemoval(
+ SourceRange(D.getDeclSpec().getStorageClassSpecLoc()));
+ }
+
+ // Handle GNU asm-label extension (encoded as an attribute).
+ if (Expr *E = (Expr*) D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ NewFD->addAttr(::new (Context) AsmLabelAttr(std::string(SE->getStrData(),
+ SE->getByteLength())));
+ }
+
+ // Copy the parameter declarations from the declarator D to the function
+ // declaration NewFD, if they are available. First scavenge them into Params.
+ llvm::SmallVector<ParmVarDecl*, 16> Params;
+ if (D.getNumTypeObjects() > 0) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+
+ // Check for C99 6.7.5.3p10 - foo(void) is a non-varargs
+ // function that takes no arguments, not a function that takes a
+ // single void argument.
+ // We let through "const void" here because Sema::GetTypeForDeclarator
+ // already checks for that case.
+ if (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
+ FTI.ArgInfo[0].Param &&
+ FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType()->isVoidType()) {
+ // Empty arg list, don't push any params.
+ ParmVarDecl *Param = FTI.ArgInfo[0].Param.getAs<ParmVarDecl>();
+
+ // In C++, the empty parameter-type-list must be spelled "void"; a
+ // typedef of void is not permitted.
+ if (getLangOptions().CPlusPlus &&
+ Param->getType().getUnqualifiedType() != Context.VoidTy)
+ Diag(Param->getLocation(), diag::err_param_typedef_of_void);
+ // FIXME: Leaks decl?
+ } else if (FTI.NumArgs > 0 && FTI.ArgInfo[0].Param != 0) {
+ for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i)
+ Params.push_back(FTI.ArgInfo[i].Param.getAs<ParmVarDecl>());
+ }
+
+ } else if (const FunctionProtoType *FT = R->getAsFunctionProtoType()) {
+ // When we're declaring a function with a typedef, typeof, etc as in the
+ // following example, we'll need to synthesize (unnamed)
+ // parameters for use in the declaration.
+ //
+ // @code
+ // typedef void fn(int);
+ // fn f;
+ // @endcode
+
+ // Synthesize a parameter for each argument type.
+ for (FunctionProtoType::arg_type_iterator AI = FT->arg_type_begin(),
+ AE = FT->arg_type_end(); AI != AE; ++AI) {
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, DC,
+ SourceLocation(), 0,
+ *AI, VarDecl::None, 0);
+ Param->setImplicit();
+ Params.push_back(Param);
+ }
+ } else {
+ assert(R->isFunctionNoProtoType() && NewFD->getNumParams() == 0 &&
+ "Should not need args for typedef of non-prototype fn");
+ }
+ // Finally, we know we have the right number of parameters, install them.
+ NewFD->setParams(Context, Params.data(), Params.size());
+
+
+
+ // If name lookup finds a previous declaration that is not in the
+ // same scope as the new declaration, this may still be an
+ // acceptable redeclaration.
+ if (PrevDecl && !isDeclInScope(PrevDecl, DC, S) &&
+ !(NewFD->hasLinkage() &&
+ isOutOfScopePreviousDeclaration(PrevDecl, DC, Context)))
+ PrevDecl = 0;
+
+ // Perform semantic checking on the function declaration.
+ bool OverloadableAttrRequired = false; // FIXME: HACK!
+ CheckFunctionDeclaration(NewFD, PrevDecl, Redeclaration,
+ /*FIXME:*/OverloadableAttrRequired);
+
+ if (D.getCXXScopeSpec().isSet() && !NewFD->isInvalidDecl()) {
+ // An out-of-line member function declaration must also be a
+ // definition (C++ [dcl.meaning]p1).
+ if (!IsFunctionDefinition) {
+ Diag(NewFD->getLocation(), diag::err_out_of_line_declaration)
+ << D.getCXXScopeSpec().getRange();
+ NewFD->setInvalidDecl();
+ } else if (!Redeclaration) {
+ // The user tried to provide an out-of-line definition for a
+ // function that is a member of a class or namespace, but there
+ // was no such member function declared (C++ [class.mfct]p2,
+ // C++ [namespace.memdef]p2). For example:
+ //
+ // class X {
+ // void f() const;
+ // };
+ //
+ // void X::f() { } // ill-formed
+ //
+ // Complain about this problem, and attempt to suggest close
+ // matches (e.g., those that differ only in cv-qualifiers and
+ // whether the parameter types are references).
+ Diag(D.getIdentifierLoc(), diag::err_member_def_does_not_match)
+ << cast<NamedDecl>(DC) << D.getCXXScopeSpec().getRange();
+ NewFD->setInvalidDecl();
+
+ LookupResult Prev = LookupQualifiedName(DC, Name, LookupOrdinaryName,
+ true);
+ assert(!Prev.isAmbiguous() &&
+ "Cannot have an ambiguity in previous-declaration lookup");
+ for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
+ Func != FuncEnd; ++Func) {
+ if (isa<FunctionDecl>(*Func) &&
+ isNearlyMatchingFunction(Context, cast<FunctionDecl>(*Func), NewFD))
+ Diag((*Func)->getLocation(), diag::note_member_def_close_match);
+ }
+
+ PrevDecl = 0;
+ }
+ }
+
+ // Handle attributes. We need to have merged decls when handling attributes
+ // (for example to check for conflicts, etc).
+ // FIXME: This needs to happen before we merge declarations. Then,
+ // let attribute merging cope with attribute conflicts.
+ ProcessDeclAttributes(NewFD, D);
+ AddKnownFunctionAttributes(NewFD);
+
+ if (OverloadableAttrRequired && !NewFD->getAttr<OverloadableAttr>()) {
+ // If a function name is overloadable in C, then every function
+ // with that name must be marked "overloadable".
+ Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing)
+ << Redeclaration << NewFD;
+ if (PrevDecl)
+ Diag(PrevDecl->getLocation(),
+ diag::note_attribute_overloadable_prev_overload);
+ NewFD->addAttr(::new (Context) OverloadableAttr());
+ }
+
+ // If this is a locally-scoped extern C function, update the
+ // map of such names.
+ if (CurContext->isFunctionOrMethod() && NewFD->isExternC(Context)
+ && !NewFD->isInvalidDecl())
+ RegisterLocallyScopedExternCDecl(NewFD, PrevDecl, S);
+
+ return NewFD;
+}
+
+/// \brief Perform semantic checking of a new function declaration.
+///
+/// Performs semantic analysis of the new function declaration
+/// NewFD. This routine performs all semantic checking that does not
+/// require the actual declarator involved in the declaration, and is
+/// used both for the declaration of functions as they are parsed
+/// (called via ActOnDeclarator) and for the declaration of functions
+/// that have been instantiated via C++ template instantiation (called
+/// via InstantiateDecl).
+///
+/// This sets NewFD->isInvalidDecl() to true if there was an error.
+void Sema::CheckFunctionDeclaration(FunctionDecl *NewFD, NamedDecl *&PrevDecl,
+ bool &Redeclaration,
+ bool &OverloadableAttrRequired) {
+ // If NewFD is already known erroneous, don't do any of this checking.
+ if (NewFD->isInvalidDecl())
+ return;
+
+ if (NewFD->getResultType()->isVariablyModifiedType()) {
+ // Functions returning a variably modified type violate C99 6.7.5.2p2
+ // because all functions have linkage.
+ Diag(NewFD->getLocation(), diag::err_vm_func_decl);
+ return NewFD->setInvalidDecl();
+ }
+
+ // Semantic checking for this function declaration (in isolation).
+ if (getLangOptions().CPlusPlus) {
+ // C++-specific checks.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
+ CheckConstructor(Constructor);
+ } else if (isa<CXXDestructorDecl>(NewFD)) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(NewFD->getParent());
+ Record->setUserDeclaredDestructor(true);
+ // C++ [class]p4: A POD-struct is an aggregate class that has [...] no
+ // user-defined destructor.
+ Record->setPOD(false);
+
+ // C++ [class.dtor]p3: A destructor is trivial if it is an implicitly-
+ // declared destructor.
+ Record->setHasTrivialDestructor(false);
+ } else if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>(NewFD))
+ ActOnConversionDeclarator(Conversion);
+
+ // Extra checking for C++ overloaded operators (C++ [over.oper]).
+ if (NewFD->isOverloadedOperator() &&
+ CheckOverloadedOperatorDeclaration(NewFD))
+ return NewFD->setInvalidDecl();
+ }
+
+ // C99 6.7.4p6:
+ // [... ] For a function with external linkage, the following
+ // restrictions apply: [...] If all of the file scope declarations
+ // for a function in a translation unit include the inline
+ // function specifier without extern, then the definition in that
+ // translation unit is an inline definition. An inline definition
+ // does not provide an external definition for the function, and
+ // does not forbid an external definition in another translation
+ // unit.
+ //
+ // Here we determine whether this function, in isolation, would be a
+ // C99 inline definition. MergeCompatibleFunctionDecls looks at
+ // previous declarations.
+ if (NewFD->isInline() && getLangOptions().C99 &&
+ NewFD->getStorageClass() == FunctionDecl::None &&
+ NewFD->getDeclContext()->getLookupContext()->isTranslationUnit())
+ NewFD->setC99InlineDefinition(true);
+
+ // Check for a previous declaration of this name.
+ if (!PrevDecl && NewFD->isExternC(Context)) {
+ // Since we did not find anything by this name and we're declaring
+ // an extern "C" function, look for a non-visible extern "C"
+ // declaration with the same name.
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos
+ = LocallyScopedExternalDecls.find(NewFD->getDeclName());
+ if (Pos != LocallyScopedExternalDecls.end())
+ PrevDecl = Pos->second;
+ }
+
+ // Merge or overload the declaration with an existing declaration of
+ // the same name, if appropriate.
+ if (PrevDecl) {
+ // Determine whether NewFD is an overload of PrevDecl or
+ // a declaration that requires merging. If it's an overload,
+ // there's no more work to do here; we'll just add the new
+ // function to the scope.
+ OverloadedFunctionDecl::function_iterator MatchedDecl;
+
+ if (!getLangOptions().CPlusPlus &&
+ AllowOverloadingOfFunction(PrevDecl, Context)) {
+ OverloadableAttrRequired = true;
+
+ // Functions marked "overloadable" must have a prototype (that
+ // we can't get through declaration merging).
+ if (!NewFD->getType()->getAsFunctionProtoType()) {
+ Diag(NewFD->getLocation(), diag::err_attribute_overloadable_no_prototype)
+ << NewFD;
+ Redeclaration = true;
+
+ // Turn this into a variadic function with no parameters.
+ QualType R = Context.getFunctionType(
+ NewFD->getType()->getAsFunctionType()->getResultType(),
+ 0, 0, true, 0);
+ NewFD->setType(R);
+ return NewFD->setInvalidDecl();
+ }
+ }
+
+ if (PrevDecl &&
+ (!AllowOverloadingOfFunction(PrevDecl, Context) ||
+ !IsOverload(NewFD, PrevDecl, MatchedDecl))) {
+ Redeclaration = true;
+ Decl *OldDecl = PrevDecl;
+
+ // If PrevDecl was an overloaded function, extract the
+ // FunctionDecl that matched.
+ if (isa<OverloadedFunctionDecl>(PrevDecl))
+ OldDecl = *MatchedDecl;
+
+ // NewFD and OldDecl represent declarations that need to be
+ // merged.
+ if (MergeFunctionDecl(NewFD, OldDecl))
+ return NewFD->setInvalidDecl();
+
+ NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl));
+ }
+ }
+
+ // In C++, check default arguments now that we have merged decls. Unless
+ // the lexical context is the class, because in this case this is done
+ // during delayed parsing anyway.
+ if (getLangOptions().CPlusPlus && !CurContext->isRecord())
+ CheckCXXDefaultArguments(NewFD);
+}
+
+bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
+ // FIXME: Need strict checking. In C89, we need to check for
+ // any assignment, increment, decrement, function-calls, or
+ // commas outside of a sizeof. In C99, it's the same list,
+ // except that the aforementioned are allowed in unevaluated
+ // expressions. Everything else falls under the
+ // "may accept other forms of constant expressions" exception.
+ // (We never end up here for C++, so the constant expression
+ // rules there don't matter.)
+ if (Init->isConstantInitializer(Context))
+ return false;
+ Diag(Init->getExprLoc(), diag::err_init_element_not_constant)
+ << Init->getSourceRange();
+ return true;
+}
+
+void Sema::AddInitializerToDecl(DeclPtrTy dcl, FullExprArg init) {
+ AddInitializerToDecl(dcl, init.release(), /*DirectInit=*/false);
+}
+
+/// AddInitializerToDecl - Adds the initializer Init to the
+/// declaration dcl. If DirectInit is true, this is C++ direct
+/// initialization rather than copy initialization.
+void Sema::AddInitializerToDecl(DeclPtrTy dcl, ExprArg init, bool DirectInit) {
+ Decl *RealDecl = dcl.getAs<Decl>();
+ // If there is no declaration, there was an error parsing it. Just ignore
+ // the initializer.
+ if (RealDecl == 0)
+ return;
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
+ // With declarators parsed the way they are, the parser cannot
+ // distinguish between a normal initializer and a pure-specifier.
+ // Thus this grotesque test.
+ IntegerLiteral *IL;
+ Expr *Init = static_cast<Expr *>(init.get());
+ if ((IL = dyn_cast<IntegerLiteral>(Init)) && IL->getValue() == 0 &&
+ Context.getCanonicalType(IL->getType()) == Context.IntTy) {
+ if (Method->isVirtualAsWritten()) {
+ Method->setPure();
+
+ // A class is abstract if at least one function is pure virtual.
+ cast<CXXRecordDecl>(CurContext)->setAbstract(true);
+ } else if (!Method->isInvalidDecl()) {
+ Diag(Method->getLocation(), diag::err_non_virtual_pure)
+ << Method->getDeclName() << Init->getSourceRange();
+ Method->setInvalidDecl();
+ }
+ } else {
+ Diag(Method->getLocation(), diag::err_member_function_initialization)
+ << Method->getDeclName() << Init->getSourceRange();
+ Method->setInvalidDecl();
+ }
+ return;
+ }
+
+ VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
+ if (!VDecl) {
+ if (getLangOptions().CPlusPlus &&
+ RealDecl->getLexicalDeclContext()->isRecord() &&
+ isa<NamedDecl>(RealDecl))
+ Diag(RealDecl->getLocation(), diag::err_member_initialization)
+ << cast<NamedDecl>(RealDecl)->getDeclName();
+ else
+ Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ if (!VDecl->getType()->isArrayType() &&
+ RequireCompleteType(VDecl->getLocation(), VDecl->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ const VarDecl *Def = 0;
+ if (VDecl->getDefinition(Def)) {
+ Diag(VDecl->getLocation(), diag::err_redefinition)
+ << VDecl->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ // Take ownership of the expression, now that we're sure we have somewhere
+ // to put it.
+ Expr *Init = init.takeAs<Expr>();
+ assert(Init && "missing initializer");
+
+ // Get the decls type and save a reference for later, since
+ // CheckInitializerTypes may change it.
+ QualType DclT = VDecl->getType(), SavT = DclT;
+ if (VDecl->isBlockVarDecl()) {
+ if (VDecl->hasExternalStorage()) { // C99 6.7.8p5
+ Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
+ VDecl->setInvalidDecl();
+ } else if (!VDecl->isInvalidDecl()) {
+ if (CheckInitializerTypes(Init, DclT, VDecl->getLocation(),
+ VDecl->getDeclName(), DirectInit))
+ VDecl->setInvalidDecl();
+
+ // C++ 3.6.2p2, allow dynamic initialization of static initializers.
+ // Don't check invalid declarations to avoid emitting useless diagnostics.
+ if (!getLangOptions().CPlusPlus && !VDecl->isInvalidDecl()) {
+ if (VDecl->getStorageClass() == VarDecl::Static) // C99 6.7.8p4.
+ CheckForConstantInitializer(Init, DclT);
+ }
+ }
+ } else if (VDecl->isStaticDataMember() &&
+ VDecl->getLexicalDeclContext()->isRecord()) {
+ // This is an in-class initialization for a static data member, e.g.,
+ //
+ // struct S {
+ // static const int value = 17;
+ // };
+
+ // Attach the initializer
+ VDecl->setInit(Context, Init);
+
+ // C++ [class.mem]p4:
+ // A member-declarator can contain a constant-initializer only
+ // if it declares a static member (9.4) of const integral or
+ // const enumeration type, see 9.4.2.
+ QualType T = VDecl->getType();
+ if (!T->isDependentType() &&
+ (!Context.getCanonicalType(T).isConstQualified() ||
+ !T->isIntegralType())) {
+ Diag(VDecl->getLocation(), diag::err_member_initialization)
+ << VDecl->getDeclName() << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ } else {
+ // C++ [class.static.data]p4:
+ // If a static data member is of const integral or const
+ // enumeration type, its declaration in the class definition
+ // can specify a constant-initializer which shall be an
+ // integral constant expression (5.19).
+ if (!Init->isTypeDependent() &&
+ !Init->getType()->isIntegralType()) {
+ // We have a non-dependent, non-integral or enumeration type.
+ Diag(Init->getSourceRange().getBegin(),
+ diag::err_in_class_initializer_non_integral_type)
+ << Init->getType() << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ } else if (!Init->isTypeDependent() && !Init->isValueDependent()) {
+ // Check whether the expression is a constant expression.
+ llvm::APSInt Value;
+ SourceLocation Loc;
+ if (!Init->isIntegerConstantExpr(Value, Context, &Loc)) {
+ Diag(Loc, diag::err_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ } else if (!VDecl->getType()->isDependentType())
+ ImpCastExprToType(Init, VDecl->getType());
+ }
+ }
+ } else if (VDecl->isFileVarDecl()) {
+ if (VDecl->getStorageClass() == VarDecl::Extern)
+ Diag(VDecl->getLocation(), diag::warn_extern_init);
+ if (!VDecl->isInvalidDecl())
+ if (CheckInitializerTypes(Init, DclT, VDecl->getLocation(),
+ VDecl->getDeclName(), DirectInit))
+ VDecl->setInvalidDecl();
+
+ // C++ 3.6.2p2, allow dynamic initialization of static initializers.
+ // Don't check invalid declarations to avoid emitting useless diagnostics.
+ if (!getLangOptions().CPlusPlus && !VDecl->isInvalidDecl()) {
+ // C99 6.7.8p4. All file scoped initializers need to be constant.
+ CheckForConstantInitializer(Init, DclT);
+ }
+ }
+ // If the type changed, it means we had an incomplete type that was
+ // completed by the initializer. For example:
+ // int ary[] = { 1, 3, 5 };
+ // "ary" transitions from a VariableArrayType to a ConstantArrayType.
+ if (!VDecl->isInvalidDecl() && (DclT != SavT)) {
+ VDecl->setType(DclT);
+ Init->setType(DclT);
+ }
+
+ // Attach the initializer to the decl.
+ VDecl->setInit(Context, Init);
+
+ // If the previous declaration of VDecl was a tentative definition,
+ // remove it from the set of tentative definitions.
+ if (VDecl->getPreviousDeclaration() &&
+ VDecl->getPreviousDeclaration()->isTentativeDefinition(Context)) {
+ llvm::DenseMap<DeclarationName, VarDecl *>::iterator Pos
+ = TentativeDefinitions.find(VDecl->getDeclName());
+ assert(Pos != TentativeDefinitions.end() &&
+ "Unrecorded tentative definition?");
+ TentativeDefinitions.erase(Pos);
+ }
+
+ return;
+}
+
+void Sema::ActOnUninitializedDecl(DeclPtrTy dcl) {
+ Decl *RealDecl = dcl.getAs<Decl>();
+
+ // If there is no declaration, there was an error parsing it. Just ignore it.
+ if (RealDecl == 0)
+ return;
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) {
+ QualType Type = Var->getType();
+
+ // Record tentative definitions.
+ if (Var->isTentativeDefinition(Context))
+ TentativeDefinitions[Var->getDeclName()] = Var;
+
+ // C++ [dcl.init.ref]p3:
+ // The initializer can be omitted for a reference only in a
+ // parameter declaration (8.3.5), in the declaration of a
+ // function return type, in the declaration of a class member
+ // within its class declaration (9.2), and where the extern
+ // specifier is explicitly used.
+ if (Type->isReferenceType() && !Var->hasExternalStorage()) {
+ Diag(Var->getLocation(), diag::err_reference_var_requires_init)
+ << Var->getDeclName()
+ << SourceRange(Var->getLocation(), Var->getLocation());
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // C++ [dcl.init]p9:
+ //
+ // If no initializer is specified for an object, and the object
+ // is of (possibly cv-qualified) non-POD class type (or array
+ // thereof), the object shall be default-initialized; if the
+ // object is of const-qualified type, the underlying class type
+ // shall have a user-declared default constructor.
+ if (getLangOptions().CPlusPlus) {
+ QualType InitType = Type;
+ if (const ArrayType *Array = Context.getAsArrayType(Type))
+ InitType = Array->getElementType();
+ if ((!Var->hasExternalStorage() && !Var->isExternC(Context)) &&
+ InitType->isRecordType() && !InitType->isDependentType()) {
+ CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(InitType->getAsRecordType()->getDecl());
+ CXXConstructorDecl *Constructor = 0;
+ if (!RequireCompleteType(Var->getLocation(), InitType,
+ diag::err_invalid_incomplete_type_use))
+ Constructor
+ = PerformInitializationByConstructor(InitType, 0, 0,
+ Var->getLocation(),
+ SourceRange(Var->getLocation(),
+ Var->getLocation()),
+ Var->getDeclName(),
+ IK_Default);
+ if (!Constructor)
+ Var->setInvalidDecl();
+ else if (!RD->hasTrivialConstructor())
+ InitializeVarWithConstructor(Var, Constructor, InitType, 0, 0);
+ }
+ }
+
+#if 0
+ // FIXME: Temporarily disabled because we are not properly parsing
+ // linkage specifications on declarations, e.g.,
+ //
+ // extern "C" const CGPoint CGPointerZero;
+ //
+ // C++ [dcl.init]p9:
+ //
+ // If no initializer is specified for an object, and the
+ // object is of (possibly cv-qualified) non-POD class type (or
+ // array thereof), the object shall be default-initialized; if
+ // the object is of const-qualified type, the underlying class
+ // type shall have a user-declared default
+ // constructor. Otherwise, if no initializer is specified for
+ // an object, the object and its subobjects, if any, have an
+ // indeterminate initial value; if the object or any of its
+ // subobjects are of const-qualified type, the program is
+ // ill-formed.
+ //
+ // This isn't technically an error in C, so we don't diagnose it.
+ //
+ // FIXME: Actually perform the POD/user-defined default
+ // constructor check.
+ if (getLangOptions().CPlusPlus &&
+ Context.getCanonicalType(Type).isConstQualified() &&
+ !Var->hasExternalStorage())
+ Diag(Var->getLocation(), diag::err_const_var_requires_init)
+ << Var->getName()
+ << SourceRange(Var->getLocation(), Var->getLocation());
+#endif
+ }
+}
+
+Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
+ DeclPtrTy *Group,
+ unsigned NumDecls) {
+ llvm::SmallVector<Decl*, 8> Decls;
+
+ if (DS.isTypeSpecOwned())
+ Decls.push_back((Decl*)DS.getTypeRep());
+
+ for (unsigned i = 0; i != NumDecls; ++i)
+ if (Decl *D = Group[i].getAs<Decl>())
+ Decls.push_back(D);
+
+ // Perform semantic analysis that depends on having fully processed both
+ // the declarator and initializer.
+ for (unsigned i = 0, e = Decls.size(); i != e; ++i) {
+ VarDecl *IDecl = dyn_cast<VarDecl>(Decls[i]);
+ if (!IDecl)
+ continue;
+ QualType T = IDecl->getType();
+
+ // Block scope. C99 6.7p7: If an identifier for an object is declared with
+ // no linkage (C99 6.2.2p6), the type for the object shall be complete...
+ if (IDecl->isBlockVarDecl() && !IDecl->hasExternalStorage()) {
+ if (!IDecl->isInvalidDecl() &&
+ RequireCompleteType(IDecl->getLocation(), T,
+ diag::err_typecheck_decl_incomplete_type))
+ IDecl->setInvalidDecl();
+ }
+ // File scope. C99 6.9.2p2: A declaration of an identifier for and
+ // object that has file scope without an initializer, and without a
+ // storage-class specifier or with the storage-class specifier "static",
+ // constitutes a tentative definition. Note: A tentative definition with
+ // external linkage is valid (C99 6.2.2p5).
+ if (IDecl->isTentativeDefinition(Context)) {
+ QualType CheckType = T;
+ unsigned DiagID = diag::err_typecheck_decl_incomplete_type;
+
+ const IncompleteArrayType *ArrayT = Context.getAsIncompleteArrayType(T);
+ if (ArrayT) {
+ CheckType = ArrayT->getElementType();
+ DiagID = diag::err_illegal_decl_array_incomplete_type;
+ }
+
+ if (IDecl->isInvalidDecl()) {
+ // Do nothing with invalid declarations
+ } else if ((ArrayT || IDecl->getStorageClass() == VarDecl::Static) &&
+ RequireCompleteType(IDecl->getLocation(), CheckType, DiagID)) {
+ // C99 6.9.2p3: If the declaration of an identifier for an object is
+ // a tentative definition and has internal linkage (C99 6.2.2p3), the
+ // declared type shall not be an incomplete type.
+ IDecl->setInvalidDecl();
+ }
+ }
+ }
+ return DeclGroupPtrTy::make(DeclGroupRef::Create(Context,
+ Decls.data(), Decls.size()));
+}
+
+
+/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
+/// to introduce parameters into function prototype scope.
+Sema::DeclPtrTy
+Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+
+ // Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
+ VarDecl::StorageClass StorageClass = VarDecl::None;
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
+ StorageClass = VarDecl::Register;
+ } else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+
+ DiagnoseFunctionSpecifiers(D);
+
+ // Check that there are no default arguments inside the type of this
+ // parameter (C++ only).
+ if (getLangOptions().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ TagDecl *OwnedDecl = 0;
+ QualType parmDeclType = GetTypeForDeclarator(D, S, /*Skip=*/0, &OwnedDecl);
+
+ if (getLangOptions().CPlusPlus && OwnedDecl && OwnedDecl->isDefinition()) {
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ Diag(OwnedDecl->getLocation(), diag::err_type_defined_in_param_type)
+ << Context.getTypeDeclType(OwnedDecl);
+ }
+
+ // TODO: CHECK FOR CONFLICTS, multiple decls with same name in one scope.
+ // Can this happen for params? We already checked that they don't conflict
+ // among each other. Here they can only shadow globals, which is ok.
+ IdentifierInfo *II = D.getIdentifier();
+ if (II) {
+ if (NamedDecl *PrevDecl = LookupName(S, II, LookupOrdinaryName)) {
+ if (PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ } else if (S->isDeclScope(DeclPtrTy::make(PrevDecl))) {
+ Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II;
+
+ // Recover by removing the name
+ II = 0;
+ D.SetIdentifier(0, D.getIdentifierLoc());
+ }
+ }
+ }
+
+ // Parameters can not be abstract class types.
+ // For record types, this is done by the AbstractClassUsageDiagnoser once
+ // the class has been completely parsed.
+ if (!CurContext->isRecord() &&
+ RequireNonAbstractType(D.getIdentifierLoc(), parmDeclType,
+ diag::err_abstract_type_in_decl,
+ AbstractParamType))
+ D.setInvalidType(true);
+
+ QualType T = adjustParameterType(parmDeclType);
+
+ ParmVarDecl *New;
+ if (T == parmDeclType) // parameter type did not need adjustment
+ New = ParmVarDecl::Create(Context, CurContext,
+ D.getIdentifierLoc(), II,
+ parmDeclType, StorageClass,
+ 0);
+ else // keep track of both the adjusted and unadjusted types
+ New = OriginalParmVarDecl::Create(Context, CurContext,
+ D.getIdentifierLoc(), II, T,
+ parmDeclType, StorageClass, 0);
+
+ if (D.isInvalidType())
+ New->setInvalidDecl();
+
+ // Parameter declarators cannot be interface types. All ObjC objects are
+ // passed by reference.
+ if (T->isObjCInterfaceType()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_object_cannot_be_passed_returned_by_value) << 1 << T;
+ New->setInvalidDecl();
+ }
+
+ // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
+ << D.getCXXScopeSpec().getRange();
+ New->setInvalidDecl();
+ }
+
+ // Add the parameter declaration into this scope.
+ S->AddDecl(DeclPtrTy::make(New));
+ if (II)
+ IdResolver.AddDecl(New);
+
+ ProcessDeclAttributes(New, D);
+
+ if (New->hasAttr<BlocksAttr>()) {
+ Diag(New->getLocation(), diag::err_block_on_nonlocal);
+ }
+ return DeclPtrTy::make(New);
+}
+
+void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
+ SourceLocation LocAfterDecls) {
+ assert(D.getTypeObject(0).Kind == DeclaratorChunk::Function &&
+ "Not a function declarator!");
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+
+ // Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
+ // for a K&R function.
+ if (!FTI.hasPrototype) {
+ for (int i = FTI.NumArgs; i != 0; /* decrement in loop */) {
+ --i;
+ if (FTI.ArgInfo[i].Param == 0) {
+ std::string Code = " int ";
+ Code += FTI.ArgInfo[i].Ident->getName();
+ Code += ";\n";
+ Diag(FTI.ArgInfo[i].IdentLoc, diag::ext_param_not_declared)
+ << FTI.ArgInfo[i].Ident
+ << CodeModificationHint::CreateInsertion(LocAfterDecls, Code);
+
+ // Implicitly declare the argument as type 'int' for lack of a better
+ // type.
+ DeclSpec DS;
+ const char* PrevSpec; // unused
+ DS.SetTypeSpecType(DeclSpec::TST_int, FTI.ArgInfo[i].IdentLoc,
+ PrevSpec);
+ Declarator ParamD(DS, Declarator::KNRTypeListContext);
+ ParamD.SetIdentifier(FTI.ArgInfo[i].Ident, FTI.ArgInfo[i].IdentLoc);
+ FTI.ArgInfo[i].Param = ActOnParamDeclarator(S, ParamD);
+ }
+ }
+ }
+}
+
+Sema::DeclPtrTy Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope,
+ Declarator &D) {
+ assert(getCurFunctionDecl() == 0 && "Function parsing confused");
+ assert(D.getTypeObject(0).Kind == DeclaratorChunk::Function &&
+ "Not a function declarator!");
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+
+ if (FTI.hasPrototype) {
+ // FIXME: Diagnose arguments without names in C.
+ }
+
+ Scope *ParentScope = FnBodyScope->getParent();
+
+ DeclPtrTy DP = ActOnDeclarator(ParentScope, D, /*IsFunctionDefinition=*/true);
+ return ActOnStartOfFunctionDef(FnBodyScope, DP);
+}
+
+Sema::DeclPtrTy Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, DeclPtrTy D) {
+ FunctionDecl *FD = cast<FunctionDecl>(D.getAs<Decl>());
+
+ CurFunctionNeedsScopeChecking = false;
+
+ // See if this is a redefinition.
+ const FunctionDecl *Definition;
+ if (FD->getBody(Context, Definition)) {
+ Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
+ Diag(Definition->getLocation(), diag::note_previous_definition);
+ }
+
+ // Builtin functions cannot be defined.
+ if (unsigned BuiltinID = FD->getBuiltinID(Context)) {
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
+ Diag(FD->getLocation(), diag::err_builtin_definition) << FD;
+ FD->setInvalidDecl();
+ }
+ }
+
+ // The return type of a function definition must be complete
+ // (C99 6.9.1p3, C++ [dcl.fct]p6).
+ QualType ResultType = FD->getResultType();
+ if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
+ !FD->isInvalidDecl() &&
+ RequireCompleteType(FD->getLocation(), ResultType,
+ diag::err_func_def_incomplete_result))
+ FD->setInvalidDecl();
+
+ // GNU warning -Wmissing-prototypes:
+ // Warn if a global function is defined without a previous
+ // prototype declaration. This warning is issued even if the
+ // definition itself provides a prototype. The aim is to detect
+ // global functions that fail to be declared in header files.
+ if (!FD->isInvalidDecl() && FD->isGlobal() && !isa<CXXMethodDecl>(FD) &&
+ !FD->isMain()) {
+ bool MissingPrototype = true;
+ for (const FunctionDecl *Prev = FD->getPreviousDeclaration();
+ Prev; Prev = Prev->getPreviousDeclaration()) {
+ // Ignore any declarations that occur in function or method
+ // scope, because they aren't visible from the header.
+ if (Prev->getDeclContext()->isFunctionOrMethod())
+ continue;
+
+ MissingPrototype = !Prev->getType()->isFunctionProtoType();
+ break;
+ }
+
+ if (MissingPrototype)
+ Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
+ }
+
+ if (FnBodyScope)
+ PushDeclContext(FnBodyScope, FD);
+
+ // Check the validity of our function parameters
+ CheckParmsForFunctionDef(FD);
+
+ // Introduce our parameters into the function scope
+ for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ Param->setOwningFunction(FD);
+
+ // If this has an identifier, add it to the scope stack.
+ if (Param->getIdentifier() && FnBodyScope)
+ PushOnScopeChains(Param, FnBodyScope);
+ }
+
+ // Checking attributes of current function definition
+ // dllimport attribute.
+ if (FD->getAttr<DLLImportAttr>() && (!FD->getAttr<DLLExportAttr>())) {
+ // dllimport attribute cannot be applied to definition.
+ if (!(FD->getAttr<DLLImportAttr>())->isInherited()) {
+ Diag(FD->getLocation(),
+ diag::err_attribute_can_be_applied_only_to_symbol_declaration)
+ << "dllimport";
+ FD->setInvalidDecl();
+ return DeclPtrTy::make(FD);
+ } else {
+ // If a symbol previously declared dllimport is later defined, the
+ // attribute is ignored in subsequent references, and a warning is
+ // emitted.
+ Diag(FD->getLocation(),
+ diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
+ << FD->getNameAsCString() << "dllimport";
+ }
+ }
+ return DeclPtrTy::make(FD);
+}
+
+Sema::DeclPtrTy Sema::ActOnFinishFunctionBody(DeclPtrTy D, StmtArg BodyArg) {
+ return ActOnFinishFunctionBody(D, move(BodyArg), false);
+}
+
+Sema::DeclPtrTy Sema::ActOnFinishFunctionBody(DeclPtrTy D, StmtArg BodyArg,
+ bool IsInstantiation) {
+ Decl *dcl = D.getAs<Decl>();
+ Stmt *Body = BodyArg.takeAs<Stmt>();
+ if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(dcl)) {
+ FD->setBody(Body);
+ assert(FD == getCurFunctionDecl() && "Function parsing confused");
+ } else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
+ assert(MD == getCurMethodDecl() && "Method parsing confused");
+ MD->setBody(Body);
+ } else {
+ Body->Destroy(Context);
+ return DeclPtrTy();
+ }
+ if (!IsInstantiation)
+ PopDeclContext();
+
+ // Verify and clean out per-function state.
+
+ assert(&getLabelMap() == &FunctionLabelMap && "Didn't pop block right?");
+
+ // Check goto/label use.
+ for (llvm::DenseMap<IdentifierInfo*, LabelStmt*>::iterator
+ I = FunctionLabelMap.begin(), E = FunctionLabelMap.end(); I != E; ++I) {
+ LabelStmt *L = I->second;
+
+ // Verify that we have no forward references left. If so, there was a goto
+ // or address of a label taken, but no definition of it. Label fwd
+ // definitions are indicated with a null substmt.
+ if (L->getSubStmt() != 0)
+ continue;
+
+ // Emit error.
+ Diag(L->getIdentLoc(), diag::err_undeclared_label_use) << L->getName();
+
+ // At this point, we have gotos that use the bogus label. Stitch it into
+ // the function body so that they aren't leaked and that the AST is well
+ // formed.
+ if (Body == 0) {
+ // The whole function wasn't parsed correctly, just delete this.
+ L->Destroy(Context);
+ continue;
+ }
+
+ // Otherwise, the body is valid: we want to stitch the label decl into the
+ // function somewhere so that it is properly owned and so that the goto
+ // has a valid target. Do this by creating a new compound stmt with the
+ // label in it.
+
+ // Give the label a sub-statement.
+ L->setSubStmt(new (Context) NullStmt(L->getIdentLoc()));
+
+ CompoundStmt *Compound = isa<CXXTryStmt>(Body) ?
+ cast<CXXTryStmt>(Body)->getTryBlock() :
+ cast<CompoundStmt>(Body);
+ std::vector<Stmt*> Elements(Compound->body_begin(), Compound->body_end());
+ Elements.push_back(L);
+ Compound->setStmts(Context, &Elements[0], Elements.size());
+ }
+ FunctionLabelMap.clear();
+
+ if (!Body) return D;
+
+ // Verify that that gotos and switch cases don't jump into scopes illegally.
+ if (CurFunctionNeedsScopeChecking)
+ DiagnoseInvalidJumps(Body);
+
+ // C++ constructors that have function-try-blocks can't have return statements
+ // in the handlers of that block. (C++ [except.handle]p14) Verify this.
+ if (isa<CXXConstructorDecl>(dcl) && isa<CXXTryStmt>(Body))
+ DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
+
+ return D;
+}
+
+/// ImplicitlyDefineFunction - An undeclared identifier was used in a function
+/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
+NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
+ IdentifierInfo &II, Scope *S) {
+ // Before we produce a declaration for an implicitly defined
+ // function, see whether there was a locally-scoped declaration of
+ // this name as a function or variable. If so, use that
+ // (non-visible) declaration, and complain about it.
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos
+ = LocallyScopedExternalDecls.find(&II);
+ if (Pos != LocallyScopedExternalDecls.end()) {
+ Diag(Loc, diag::warn_use_out_of_scope_declaration) << Pos->second;
+ Diag(Pos->second->getLocation(), diag::note_previous_declaration);
+ return Pos->second;
+ }
+
+ // Extension in C99. Legal in C90, but warn about it.
+ if (getLangOptions().C99)
+ Diag(Loc, diag::ext_implicit_function_decl) << &II;
+ else
+ Diag(Loc, diag::warn_implicit_function_decl) << &II;
+
+ // FIXME: handle stuff like:
+ // void foo() { extern float X(); }
+ // void bar() { X(); } <-- implicit decl for X in another scope.
+
+ // Set a Declarator for the implicit definition: int foo();
+ const char *Dummy;
+ DeclSpec DS;
+ bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy);
+ Error = Error; // Silence warning.
+ assert(!Error && "Error setting up implicit decl!");
+ Declarator D(DS, Declarator::BlockContext);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, SourceLocation(), 0,
+ 0, 0, false, SourceLocation(),
+ false, 0,0,0, Loc, D),
+ SourceLocation());
+ D.SetIdentifier(&II, Loc);
+
+ // Insert this function into translation-unit scope.
+
+ DeclContext *PrevDC = CurContext;
+ CurContext = Context.getTranslationUnitDecl();
+
+ FunctionDecl *FD =
+ dyn_cast<FunctionDecl>(ActOnDeclarator(TUScope, D, DeclPtrTy()).getAs<Decl>());
+ FD->setImplicit();
+
+ CurContext = PrevDC;
+
+ AddKnownFunctionAttributes(FD);
+
+ return FD;
+}
+
+/// \brief Adds any function attributes that we know a priori based on
+/// the declaration of this function.
+///
+/// These attributes can apply both to implicitly-declared builtins
+/// (like __builtin___printf_chk) or to library-declared functions
+/// like NSLog or printf.
+void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return;
+
+ // If this is a built-in function, map its builtin attributes to
+ // actual attributes.
+ if (unsigned BuiltinID = FD->getBuiltinID(Context)) {
+ // Handle printf-formatting attributes.
+ unsigned FormatIdx;
+ bool HasVAListArg;
+ if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) {
+ if (!FD->getAttr<FormatAttr>())
+ FD->addAttr(::new (Context) FormatAttr("printf", FormatIdx + 1,
+ FormatIdx + 2));
+ }
+
+ // Mark const if we don't care about errno and that is the only
+ // thing preventing the function from being const. This allows
+ // IRgen to use LLVM intrinsics for such functions.
+ if (!getLangOptions().MathErrno &&
+ Context.BuiltinInfo.isConstWithoutErrno(BuiltinID)) {
+ if (!FD->getAttr<ConstAttr>())
+ FD->addAttr(::new (Context) ConstAttr());
+ }
+ }
+
+ IdentifierInfo *Name = FD->getIdentifier();
+ if (!Name)
+ return;
+ if ((!getLangOptions().CPlusPlus &&
+ FD->getDeclContext()->isTranslationUnit()) ||
+ (isa<LinkageSpecDecl>(FD->getDeclContext()) &&
+ cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() ==
+ LinkageSpecDecl::lang_c)) {
+ // Okay: this could be a libc/libm/Objective-C function we know
+ // about.
+ } else
+ return;
+
+ if (Name->isStr("NSLog") || Name->isStr("NSLogv")) {
+ if (const FormatAttr *Format = FD->getAttr<FormatAttr>()) {
+ // FIXME: We known better than our headers.
+ const_cast<FormatAttr *>(Format)->setType("printf");
+ } else
+ FD->addAttr(::new (Context) FormatAttr("printf", 1, 2));
+ } else if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
+ if (!FD->getAttr<FormatAttr>())
+ FD->addAttr(::new (Context) FormatAttr("printf", 2, 3));
+ }
+}
+
+TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T) {
+ assert(D.getIdentifier() && "Wrong callback for declspec without declarator");
+ assert(!T.isNull() && "GetTypeForDeclarator() returned null type");
+
+ // Scope manipulation handled by caller.
+ TypedefDecl *NewTD = TypedefDecl::Create(Context, CurContext,
+ D.getIdentifierLoc(),
+ D.getIdentifier(),
+ T);
+
+ if (TagType *TT = dyn_cast<TagType>(T)) {
+ TagDecl *TD = TT->getDecl();
+
+ // If the TagDecl that the TypedefDecl points to is an anonymous decl
+ // keep track of the TypedefDecl.
+ if (!TD->getIdentifier() && !TD->getTypedefForAnonDecl())
+ TD->setTypedefForAnonDecl(NewTD);
+ }
+
+ if (D.isInvalidType())
+ NewTD->setInvalidDecl();
+ return NewTD;
+}
+
+
+/// \brief Determine whether a tag with a given kind is acceptable
+/// as a redeclaration of the given tag declaration.
+///
+/// \returns true if the new tag kind is acceptable, false otherwise.
+bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
+ TagDecl::TagKind NewTag,
+ SourceLocation NewTagLoc,
+ const IdentifierInfo &Name) {
+ // C++ [dcl.type.elab]p3:
+ // The class-key or enum keyword present in the
+ // elaborated-type-specifier shall agree in kind with the
+ // declaration to which the name in theelaborated-type-specifier
+ // refers. This rule also applies to the form of
+ // elaborated-type-specifier that declares a class-name or
+ // friend class since it can be construed as referring to the
+ // definition of the class. Thus, in any
+ // elaborated-type-specifier, the enum keyword shall be used to
+ // refer to an enumeration (7.2), the union class-keyshall be
+ // used to refer to a union (clause 9), and either the class or
+ // struct class-key shall be used to refer to a class (clause 9)
+ // declared using the class or struct class-key.
+ TagDecl::TagKind OldTag = Previous->getTagKind();
+ if (OldTag == NewTag)
+ return true;
+
+ if ((OldTag == TagDecl::TK_struct || OldTag == TagDecl::TK_class) &&
+ (NewTag == TagDecl::TK_struct || NewTag == TagDecl::TK_class)) {
+ // Warn about the struct/class tag mismatch.
+ bool isTemplate = false;
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
+ isTemplate = Record->getDescribedClassTemplate();
+
+ Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
+ << (NewTag == TagDecl::TK_class)
+ << isTemplate << &Name
+ << CodeModificationHint::CreateReplacement(SourceRange(NewTagLoc),
+ OldTag == TagDecl::TK_class? "class" : "struct");
+ Diag(Previous->getLocation(), diag::note_previous_use);
+ return true;
+ }
+ return false;
+}
+
+/// ActOnTag - This is invoked when we see 'struct foo' or 'struct {'. In the
+/// former case, Name will be non-null. In the later case, Name will be null.
+/// TagSpec indicates what kind of tag this is. TK indicates whether this is a
+/// reference/declaration/definition of a tag.
+Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagKind TK,
+ SourceLocation KWLoc, const CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr, AccessSpecifier AS,
+ bool &OwnedDecl) {
+ // If this is not a definition, it must have a name.
+ assert((Name != 0 || TK == TK_Definition) &&
+ "Nameless record must be a definition!");
+
+ OwnedDecl = false;
+ TagDecl::TagKind Kind;
+ switch (TagSpec) {
+ default: assert(0 && "Unknown tag type!");
+ case DeclSpec::TST_struct: Kind = TagDecl::TK_struct; break;
+ case DeclSpec::TST_union: Kind = TagDecl::TK_union; break;
+ case DeclSpec::TST_class: Kind = TagDecl::TK_class; break;
+ case DeclSpec::TST_enum: Kind = TagDecl::TK_enum; break;
+ }
+
+ DeclContext *SearchDC = CurContext;
+ DeclContext *DC = CurContext;
+ NamedDecl *PrevDecl = 0;
+
+ bool Invalid = false;
+
+ if (Name && SS.isNotEmpty()) {
+ // We have a nested-name tag ('struct foo::bar').
+
+ // Check for invalid 'foo::'.
+ if (SS.isInvalid()) {
+ Name = 0;
+ goto CreateNewDecl;
+ }
+
+ if (RequireCompleteDeclContext(SS))
+ return DeclPtrTy::make((Decl *)0);
+
+ DC = computeDeclContext(SS);
+ SearchDC = DC;
+ // Look-up name inside 'foo::'.
+ PrevDecl
+ = dyn_cast_or_null<TagDecl>(
+ LookupQualifiedName(DC, Name, LookupTagName, true).getAsDecl());
+
+ // A tag 'foo::bar' must already exist.
+ if (PrevDecl == 0) {
+ Diag(NameLoc, diag::err_not_tag_in_scope) << Name << SS.getRange();
+ Name = 0;
+ Invalid = true;
+ goto CreateNewDecl;
+ }
+ } else if (Name) {
+ // If this is a named struct, check to see if there was a previous forward
+ // declaration or definition.
+ // FIXME: We're looking into outer scopes here, even when we
+ // shouldn't be. Doing so can result in ambiguities that we
+ // shouldn't be diagnosing.
+ LookupResult R = LookupName(S, Name, LookupTagName,
+ /*RedeclarationOnly=*/(TK != TK_Reference));
+ if (R.isAmbiguous()) {
+ DiagnoseAmbiguousLookup(R, Name, NameLoc);
+ // FIXME: This is not best way to recover from case like:
+ //
+ // struct S s;
+ //
+ // causes needless "incomplete type" error later.
+ Name = 0;
+ PrevDecl = 0;
+ Invalid = true;
+ }
+ else
+ PrevDecl = R;
+
+ if (!getLangOptions().CPlusPlus && TK != TK_Reference) {
+ // FIXME: This makes sure that we ignore the contexts associated
+ // with C structs, unions, and enums when looking for a matching
+ // tag declaration or definition. See the similar lookup tweak
+ // in Sema::LookupName; is there a better way to deal with this?
+ while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
+ SearchDC = SearchDC->getParent();
+ }
+ }
+
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(NameLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ if (PrevDecl) {
+ // Check whether the previous declaration is usable.
+ (void)DiagnoseUseOfDecl(PrevDecl, NameLoc);
+
+ if (TagDecl *PrevTagDecl = dyn_cast<TagDecl>(PrevDecl)) {
+ // If this is a use of a previous tag, or if the tag is already declared
+ // in the same scope (so that the definition/declaration completes or
+ // rementions the tag), reuse the decl.
+ if (TK == TK_Reference || isDeclInScope(PrevDecl, SearchDC, S)) {
+ // Make sure that this wasn't declared as an enum and now used as a
+ // struct or something similar.
+ if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind, KWLoc, *Name)) {
+ bool SafeToContinue
+ = (PrevTagDecl->getTagKind() != TagDecl::TK_enum &&
+ Kind != TagDecl::TK_enum);
+ if (SafeToContinue)
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << Name
+ << CodeModificationHint::CreateReplacement(SourceRange(KWLoc),
+ PrevTagDecl->getKindName());
+ else
+ Diag(KWLoc, diag::err_use_with_wrong_tag) << Name;
+ Diag(PrevDecl->getLocation(), diag::note_previous_use);
+
+ if (SafeToContinue)
+ Kind = PrevTagDecl->getTagKind();
+ else {
+ // Recover by making this an anonymous redefinition.
+ Name = 0;
+ PrevDecl = 0;
+ Invalid = true;
+ }
+ }
+
+ if (!Invalid) {
+ // If this is a use, just return the declaration we found.
+
+ // FIXME: In the future, return a variant or some other clue
+ // for the consumer of this Decl to know it doesn't own it.
+ // For our current ASTs this shouldn't be a problem, but will
+ // need to be changed with DeclGroups.
+ if (TK == TK_Reference)
+ return DeclPtrTy::make(PrevDecl);
+
+ // Diagnose attempts to redefine a tag.
+ if (TK == TK_Definition) {
+ if (TagDecl *Def = PrevTagDecl->getDefinition(Context)) {
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ // If this is a redefinition, recover by making this
+ // struct be anonymous, which will make any later
+ // references get the previous definition.
+ Name = 0;
+ PrevDecl = 0;
+ Invalid = true;
+ } else {
+ // If the type is currently being defined, complain
+ // about a nested redefinition.
+ TagType *Tag = cast<TagType>(Context.getTagDeclType(PrevTagDecl));
+ if (Tag->isBeingDefined()) {
+ Diag(NameLoc, diag::err_nested_redefinition) << Name;
+ Diag(PrevTagDecl->getLocation(),
+ diag::note_previous_definition);
+ Name = 0;
+ PrevDecl = 0;
+ Invalid = true;
+ }
+ }
+
+ // Okay, this is definition of a previously declared or referenced
+ // tag PrevDecl. We're going to create a new Decl for it.
+ }
+ }
+ // If we get here we have (another) forward declaration or we
+ // have a definition. Just create a new decl.
+ } else {
+ // If we get here, this is a definition of a new tag type in a nested
+ // scope, e.g. "struct foo; void bar() { struct foo; }", just create a
+ // new decl/type. We set PrevDecl to NULL so that the entities
+ // have distinct types.
+ PrevDecl = 0;
+ }
+ // If we get here, we're going to create a new Decl. If PrevDecl
+ // is non-NULL, it's a definition of the tag declared by
+ // PrevDecl. If it's NULL, we have a new definition.
+ } else {
+ // PrevDecl is a namespace, template, or anything else
+ // that lives in the IDNS_Tag identifier namespace.
+ if (isDeclInScope(PrevDecl, SearchDC, S)) {
+ // The tag name clashes with a namespace name, issue an error and
+ // recover by making this tag be anonymous.
+ Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ Name = 0;
+ PrevDecl = 0;
+ Invalid = true;
+ } else {
+ // The existing declaration isn't relevant to us; we're in a
+ // new scope, so clear out the previous declaration.
+ PrevDecl = 0;
+ }
+ }
+ } else if (TK == TK_Reference && SS.isEmpty() && Name &&
+ (Kind != TagDecl::TK_enum || !getLangOptions().CPlusPlus)) {
+ // C++ [basic.scope.pdecl]p5:
+ // -- for an elaborated-type-specifier of the form
+ //
+ // class-key identifier
+ //
+ // if the elaborated-type-specifier is used in the
+ // decl-specifier-seq or parameter-declaration-clause of a
+ // function defined in namespace scope, the identifier is
+ // declared as a class-name in the namespace that contains
+ // the declaration; otherwise, except as a friend
+ // declaration, the identifier is declared in the smallest
+ // non-class, non-function-prototype scope that contains the
+ // declaration.
+ //
+ // C99 6.7.2.3p8 has a similar (but not identical!) provision for
+ // C structs and unions.
+ //
+ // GNU C also supports this behavior as part of its incomplete
+ // enum types extension, while GNU C++ does not.
+ //
+ // Find the context where we'll be declaring the tag.
+ // FIXME: We would like to maintain the current DeclContext as the
+ // lexical context,
+ while (SearchDC->isRecord())
+ SearchDC = SearchDC->getParent();
+
+ // Find the scope where we'll be declaring the tag.
+ while (S->isClassScope() ||
+ (getLangOptions().CPlusPlus && S->isFunctionPrototypeScope()) ||
+ ((S->getFlags() & Scope::DeclScope) == 0) ||
+ (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext()))
+ S = S->getParent();
+ }
+
+CreateNewDecl:
+
+ // If there is an identifier, use the location of the identifier as the
+ // location of the decl, otherwise use the location of the struct/union
+ // keyword.
+ SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
+
+ // Otherwise, create a new declaration. If there is a previous
+ // declaration of the same entity, the two will be linked via
+ // PrevDecl.
+ TagDecl *New;
+
+ if (Kind == TagDecl::TK_enum) {
+ // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
+ // enum X { A, B, C } D; D should chain to X.
+ New = EnumDecl::Create(Context, SearchDC, Loc, Name,
+ cast_or_null<EnumDecl>(PrevDecl));
+ // If this is an undefined enum, warn.
+ if (TK != TK_Definition && !Invalid) {
+ unsigned DK = getLangOptions().CPlusPlus? diag::err_forward_ref_enum
+ : diag::ext_forward_ref_enum;
+ Diag(Loc, DK);
+ }
+ } else {
+ // struct/union/class
+
+ // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
+ // struct X { int A; } D; D should chain to X.
+ if (getLangOptions().CPlusPlus)
+ // FIXME: Look for a way to use RecordDecl for simple structs.
+ New = CXXRecordDecl::Create(Context, Kind, SearchDC, Loc, Name,
+ cast_or_null<CXXRecordDecl>(PrevDecl));
+ else
+ New = RecordDecl::Create(Context, Kind, SearchDC, Loc, Name,
+ cast_or_null<RecordDecl>(PrevDecl));
+ }
+
+ if (Kind != TagDecl::TK_enum) {
+ // Handle #pragma pack: if the #pragma pack stack has non-default
+ // alignment, make up a packed attribute for this decl. These
+ // attributes are checked when the ASTContext lays out the
+ // structure.
+ //
+ // It is important for implementing the correct semantics that this
+ // happen here (in act on tag decl). The #pragma pack stack is
+ // maintained as a result of parser callbacks which can occur at
+ // many points during the parsing of a struct declaration (because
+ // the #pragma tokens are effectively skipped over during the
+ // parsing of the struct).
+ if (unsigned Alignment = getPragmaPackAlignment())
+ New->addAttr(::new (Context) PackedAttr(Alignment * 8));
+ }
+
+ if (getLangOptions().CPlusPlus && SS.isEmpty() && Name && !Invalid) {
+ // C++ [dcl.typedef]p3:
+ // [...] Similarly, in a given scope, a class or enumeration
+ // shall not be declared with the same name as a typedef-name
+ // that is declared in that scope and refers to a type other
+ // than the class or enumeration itself.
+ LookupResult Lookup = LookupName(S, Name, LookupOrdinaryName, true);
+ TypedefDecl *PrevTypedef = 0;
+ if (Lookup.getKind() == LookupResult::Found)
+ PrevTypedef = dyn_cast<TypedefDecl>(Lookup.getAsDecl());
+
+ if (PrevTypedef && isDeclInScope(PrevTypedef, SearchDC, S) &&
+ Context.getCanonicalType(Context.getTypeDeclType(PrevTypedef)) !=
+ Context.getCanonicalType(Context.getTypeDeclType(New))) {
+ Diag(Loc, diag::err_tag_definition_of_typedef)
+ << Context.getTypeDeclType(New)
+ << PrevTypedef->getUnderlyingType();
+ Diag(PrevTypedef->getLocation(), diag::note_previous_definition);
+ Invalid = true;
+ }
+ }
+
+ if (Invalid)
+ New->setInvalidDecl();
+
+ if (Attr)
+ ProcessDeclAttributeList(New, Attr);
+
+ // If we're declaring or defining a tag in function prototype scope
+ // in C, note that this type can only be used within the function.
+ if (Name && S->isFunctionPrototypeScope() && !getLangOptions().CPlusPlus)
+ Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
+
+ // Set the lexical context. If the tag has a C++ scope specifier, the
+ // lexical context will be different from the semantic context.
+ New->setLexicalDeclContext(CurContext);
+
+ // Set the access specifier.
+ if (!Invalid)
+ SetMemberAccessSpecifier(New, PrevDecl, AS);
+
+ if (TK == TK_Definition)
+ New->startDefinition();
+
+ // If this has an identifier, add it to the scope stack.
+ if (Name) {
+ S = getNonFieldDeclScope(S);
+ PushOnScopeChains(New, S);
+ } else {
+ CurContext->addDecl(Context, New);
+ }
+
+ OwnedDecl = true;
+ return DeclPtrTy::make(New);
+}
+
+void Sema::ActOnTagStartDefinition(Scope *S, DeclPtrTy TagD) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD.getAs<Decl>());
+
+ // Enter the tag context.
+ PushDeclContext(S, Tag);
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Tag)) {
+ FieldCollector->StartClass();
+
+ if (Record->getIdentifier()) {
+ // C++ [class]p2:
+ // [...] The class-name is also inserted into the scope of the
+ // class itself; this is known as the injected-class-name. For
+ // purposes of access checking, the injected-class-name is treated
+ // as if it were a public member name.
+ CXXRecordDecl *InjectedClassName
+ = CXXRecordDecl::Create(Context, Record->getTagKind(),
+ CurContext, Record->getLocation(),
+ Record->getIdentifier(), Record);
+ InjectedClassName->setImplicit();
+ InjectedClassName->setAccess(AS_public);
+ if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
+ InjectedClassName->setDescribedClassTemplate(Template);
+ PushOnScopeChains(InjectedClassName, S);
+ assert(InjectedClassName->isInjectedClassName() &&
+ "Broken injected-class-name");
+ }
+ }
+}
+
+void Sema::ActOnTagFinishDefinition(Scope *S, DeclPtrTy TagD) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD.getAs<Decl>());
+
+ if (isa<CXXRecordDecl>(Tag))
+ FieldCollector->FinishClass();
+
+ // Exit this scope of this tag's definition.
+ PopDeclContext();
+
+ // Notify the consumer that we've defined a tag.
+ Consumer.HandleTagDeclDefinition(Tag);
+}
+
+// Note that FieldName may be null for anonymous bitfields.
+bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
+ QualType FieldTy, const Expr *BitWidth) {
+
+ // C99 6.7.2.1p4 - verify the field type.
+ // C++ 9.6p3: A bit-field shall have integral or enumeration type.
+ if (!FieldTy->isDependentType() && !FieldTy->isIntegralType()) {
+ // Handle incomplete types with specific error.
+ if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
+ return true;
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
+ << FieldName << FieldTy << BitWidth->getSourceRange();
+ return Diag(FieldLoc, diag::err_not_integral_type_anon_bitfield)
+ << FieldTy << BitWidth->getSourceRange();
+ }
+
+ // If the bit-width is type- or value-dependent, don't try to check
+ // it now.
+ if (BitWidth->isValueDependent() || BitWidth->isTypeDependent())
+ return false;
+
+ llvm::APSInt Value;
+ if (VerifyIntegerConstantExpression(BitWidth, &Value))
+ return true;
+
+ // Zero-width bitfield is ok for anonymous field.
+ if (Value == 0 && FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName;
+
+ if (Value.isSigned() && Value.isNegative()) {
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_has_negative_width)
+ << FieldName << Value.toString(10);
+ return Diag(FieldLoc, diag::err_anon_bitfield_has_negative_width)
+ << Value.toString(10);
+ }
+
+ if (!FieldTy->isDependentType()) {
+ uint64_t TypeSize = Context.getTypeSize(FieldTy);
+ if (Value.getZExtValue() > TypeSize) {
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_size)
+ << FieldName << (unsigned)TypeSize;
+ return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_size)
+ << (unsigned)TypeSize;
+ }
+ }
+
+ return false;
+}
+
+/// ActOnField - Each field of a struct/union/class is passed into this in order
+/// to create a FieldDecl object for it.
+Sema::DeclPtrTy Sema::ActOnField(Scope *S, DeclPtrTy TagD,
+ SourceLocation DeclStart,
+ Declarator &D, ExprTy *BitfieldWidth) {
+ FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD.getAs<Decl>()),
+ DeclStart, D, static_cast<Expr*>(BitfieldWidth),
+ AS_public);
+ return DeclPtrTy::make(Res);
+}
+
+/// HandleField - Analyze a field of a C struct or a C++ data member.
+///
+FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
+ SourceLocation DeclStart,
+ Declarator &D, Expr *BitWidth,
+ AccessSpecifier AS) {
+ IdentifierInfo *II = D.getIdentifier();
+ SourceLocation Loc = DeclStart;
+ if (II) Loc = D.getIdentifierLoc();
+
+ QualType T = GetTypeForDeclarator(D, S);
+ if (getLangOptions().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ DiagnoseFunctionSpecifiers(D);
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+
+ NamedDecl *PrevDecl = LookupName(S, II, LookupMemberName, true);
+ if (PrevDecl && !isDeclInScope(PrevDecl, Record, S))
+ PrevDecl = 0;
+
+ FieldDecl *NewFD
+ = CheckFieldDecl(II, T, Record, Loc,
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable,
+ BitWidth, AS, PrevDecl, &D);
+ if (NewFD->isInvalidDecl() && PrevDecl) {
+ // Don't introduce NewFD into scope; there's already something
+ // with the same name in the same scope.
+ } else if (II) {
+ PushOnScopeChains(NewFD, S);
+ } else
+ Record->addDecl(Context, NewFD);
+
+ return NewFD;
+}
+
+/// \brief Build a new FieldDecl and check its well-formedness.
+///
+/// This routine builds a new FieldDecl given the fields name, type,
+/// record, etc. \p PrevDecl should refer to any previous declaration
+/// with the same name and in the same scope as the field to be
+/// created.
+///
+/// \returns a new FieldDecl.
+///
+/// \todo The Declarator argument is a hack. It will be removed once
+FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
+ RecordDecl *Record, SourceLocation Loc,
+ bool Mutable, Expr *BitWidth,
+ AccessSpecifier AS, NamedDecl *PrevDecl,
+ Declarator *D) {
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ bool InvalidDecl = false;
+ if (D) InvalidDecl = D->isInvalidType();
+
+ // If we receive a broken type, recover by assuming 'int' and
+ // marking this declaration as invalid.
+ if (T.isNull()) {
+ InvalidDecl = true;
+ T = Context.IntTy;
+ }
+
+ // C99 6.7.2.1p8: A member of a structure or union may have any type other
+ // than a variably modified type.
+ if (T->isVariablyModifiedType()) {
+ bool SizeIsNegative;
+ QualType FixedTy = TryToFixInvalidVariablyModifiedType(T, Context,
+ SizeIsNegative);
+ if (!FixedTy.isNull()) {
+ Diag(Loc, diag::warn_illegal_constant_array_size);
+ T = FixedTy;
+ } else {
+ if (SizeIsNegative)
+ Diag(Loc, diag::err_typecheck_negative_array_size);
+ else
+ Diag(Loc, diag::err_typecheck_field_variable_size);
+ T = Context.IntTy;
+ InvalidDecl = true;
+ }
+ }
+
+ // Fields can not have abstract class types
+ if (RequireNonAbstractType(Loc, T, diag::err_abstract_type_in_decl,
+ AbstractFieldType))
+ InvalidDecl = true;
+
+ // If this is declared as a bit-field, check the bit-field.
+ if (BitWidth && VerifyBitField(Loc, II, T, BitWidth)) {
+ InvalidDecl = true;
+ DeleteExpr(BitWidth);
+ BitWidth = 0;
+ }
+
+ FieldDecl *NewFD = FieldDecl::Create(Context, Record, Loc, II, T, BitWidth,
+ Mutable);
+ if (InvalidDecl)
+ NewFD->setInvalidDecl();
+
+ if (PrevDecl && !isa<TagDecl>(PrevDecl)) {
+ Diag(Loc, diag::err_duplicate_member) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ }
+
+ if (getLangOptions().CPlusPlus && !T->isPODType())
+ cast<CXXRecordDecl>(Record)->setPOD(false);
+
+ // FIXME: We need to pass in the attributes given an AST
+ // representation, not a parser representation.
+ if (D)
+ ProcessDeclAttributes(NewFD, *D);
+
+ if (T.isObjCGCWeak())
+ Diag(Loc, diag::warn_attribute_weak_on_field);
+
+ NewFD->setAccess(AS);
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with [...] no
+ // private or protected non-static data members (clause 11).
+ // A POD must be an aggregate.
+ if (getLangOptions().CPlusPlus &&
+ (AS == AS_private || AS == AS_protected)) {
+ CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record);
+ CXXRecord->setAggregate(false);
+ CXXRecord->setPOD(false);
+ }
+
+ return NewFD;
+}
+
+/// TranslateIvarVisibility - Translate visibility from a token ID to an
+/// AST enum value.
+static ObjCIvarDecl::AccessControl
+TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
+ switch (ivarVisibility) {
+ default: assert(0 && "Unknown visitibility kind");
+ case tok::objc_private: return ObjCIvarDecl::Private;
+ case tok::objc_public: return ObjCIvarDecl::Public;
+ case tok::objc_protected: return ObjCIvarDecl::Protected;
+ case tok::objc_package: return ObjCIvarDecl::Package;
+ }
+}
+
+/// ActOnIvar - Each ivar field of an objective-c class is passed into this
+/// in order to create an IvarDecl object for it.
+Sema::DeclPtrTy Sema::ActOnIvar(Scope *S,
+ SourceLocation DeclStart,
+ Declarator &D, ExprTy *BitfieldWidth,
+ tok::ObjCKeywordKind Visibility) {
+
+ IdentifierInfo *II = D.getIdentifier();
+ Expr *BitWidth = (Expr*)BitfieldWidth;
+ SourceLocation Loc = DeclStart;
+ if (II) Loc = D.getIdentifierLoc();
+
+ // FIXME: Unnamed fields can be handled in various different ways, for
+ // example, unnamed unions inject all members into the struct namespace!
+
+ QualType T = GetTypeForDeclarator(D, S);
+
+ if (BitWidth) {
+ // 6.7.2.1p3, 6.7.2.1p4
+ if (VerifyBitField(Loc, II, T, BitWidth)) {
+ D.setInvalidType();
+ DeleteExpr(BitWidth);
+ BitWidth = 0;
+ }
+ } else {
+ // Not a bitfield.
+
+ // validate II.
+
+ }
+
+ // C99 6.7.2.1p8: A member of a structure or union may have any type other
+ // than a variably modified type.
+ if (T->isVariablyModifiedType()) {
+ Diag(Loc, diag::err_typecheck_ivar_variable_size);
+ D.setInvalidType();
+ }
+
+ // Get the visibility (access control) for this ivar.
+ ObjCIvarDecl::AccessControl ac =
+ Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility)
+ : ObjCIvarDecl::None;
+
+ // Construct the decl.
+ ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, CurContext, Loc, II, T,ac,
+ (Expr *)BitfieldWidth);
+
+ if (II) {
+ NamedDecl *PrevDecl = LookupName(S, II, LookupMemberName, true);
+ if (PrevDecl && isDeclInScope(PrevDecl, CurContext, S)
+ && !isa<TagDecl>(PrevDecl)) {
+ Diag(Loc, diag::err_duplicate_member) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ NewID->setInvalidDecl();
+ }
+ }
+
+ // Process attributes attached to the ivar.
+ ProcessDeclAttributes(NewID, D);
+
+ if (D.isInvalidType())
+ NewID->setInvalidDecl();
+
+ if (II) {
+ // FIXME: When interfaces are DeclContexts, we'll need to add
+ // these to the interface.
+ S->AddDecl(DeclPtrTy::make(NewID));
+ IdResolver.AddDecl(NewID);
+ }
+
+ return DeclPtrTy::make(NewID);
+}
+
+void Sema::ActOnFields(Scope* S,
+ SourceLocation RecLoc, DeclPtrTy RecDecl,
+ DeclPtrTy *Fields, unsigned NumFields,
+ SourceLocation LBrac, SourceLocation RBrac,
+ AttributeList *Attr) {
+ Decl *EnclosingDecl = RecDecl.getAs<Decl>();
+ assert(EnclosingDecl && "missing record or interface decl");
+
+ // If the decl this is being inserted into is invalid, then it may be a
+ // redeclaration or some other bogus case. Don't try to add fields to it.
+ if (EnclosingDecl->isInvalidDecl()) {
+ // FIXME: Deallocate fields?
+ return;
+ }
+
+
+ // Verify that all the fields are okay.
+ unsigned NumNamedMembers = 0;
+ llvm::SmallVector<FieldDecl*, 32> RecFields;
+
+ RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
+ for (unsigned i = 0; i != NumFields; ++i) {
+ FieldDecl *FD = cast<FieldDecl>(Fields[i].getAs<Decl>());
+
+ // Get the type for the field.
+ Type *FDTy = FD->getType().getTypePtr();
+
+ if (!FD->isAnonymousStructOrUnion()) {
+ // Remember all fields written by the user.
+ RecFields.push_back(FD);
+ }
+
+ // If the field is already invalid for some reason, don't emit more
+ // diagnostics about it.
+ if (FD->isInvalidDecl())
+ continue;
+
+ // C99 6.7.2.1p2:
+ // A structure or union shall not contain a member with
+ // incomplete or function type (hence, a structure shall not
+ // contain an instance of itself, but may contain a pointer to
+ // an instance of itself), except that the last member of a
+ // structure with more than one named member may have incomplete
+ // array type; such a structure (and any union containing,
+ // possibly recursively, a member that is such a structure)
+ // shall not be a member of a structure or an element of an
+ // array.
+ if (FDTy->isFunctionType()) {
+ // Field declared as a function.
+ Diag(FD->getLocation(), diag::err_field_declared_as_function)
+ << FD->getDeclName();
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ } else if (FDTy->isIncompleteArrayType() && i == NumFields - 1 &&
+ Record && Record->isStruct()) {
+ // Flexible array member.
+ if (NumNamedMembers < 1) {
+ Diag(FD->getLocation(), diag::err_flexible_array_empty_struct)
+ << FD->getDeclName();
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ }
+ // Okay, we have a legal flexible array member at the end of the struct.
+ if (Record)
+ Record->setHasFlexibleArrayMember(true);
+ } else if (!FDTy->isDependentType() &&
+ RequireCompleteType(FD->getLocation(), FD->getType(),
+ diag::err_field_incomplete)) {
+ // Incomplete type
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ } else if (const RecordType *FDTTy = FDTy->getAsRecordType()) {
+ if (FDTTy->getDecl()->hasFlexibleArrayMember()) {
+ // If this is a member of a union, then entire union becomes "flexible".
+ if (Record && Record->isUnion()) {
+ Record->setHasFlexibleArrayMember(true);
+ } else {
+ // If this is a struct/class and this is not the last element, reject
+ // it. Note that GCC supports variable sized arrays in the middle of
+ // structures.
+ if (i != NumFields-1)
+ Diag(FD->getLocation(), diag::ext_variable_sized_type_in_struct)
+ << FD->getDeclName() << FD->getType();
+ else {
+ // We support flexible arrays at the end of structs in
+ // other structs as an extension.
+ Diag(FD->getLocation(), diag::ext_flexible_array_in_struct)
+ << FD->getDeclName();
+ if (Record)
+ Record->setHasFlexibleArrayMember(true);
+ }
+ }
+ }
+ } else if (FDTy->isObjCInterfaceType()) {
+ /// A field cannot be an Objective-c object
+ Diag(FD->getLocation(), diag::err_statically_allocated_object);
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ }
+ // Keep track of the number of named members.
+ if (FD->getIdentifier())
+ ++NumNamedMembers;
+ }
+
+ // Okay, we successfully defined 'Record'.
+ if (Record) {
+ Record->completeDefinition(Context);
+ } else {
+ ObjCIvarDecl **ClsFields =
+ reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
+ ID->setIVarList(ClsFields, RecFields.size(), Context);
+ ID->setLocEnd(RBrac);
+
+ // Must enforce the rule that ivars in the base classes may not be
+ // duplicates.
+ if (ID->getSuperClass()) {
+ for (ObjCInterfaceDecl::ivar_iterator IVI = ID->ivar_begin(),
+ IVE = ID->ivar_end(); IVI != IVE; ++IVI) {
+ ObjCIvarDecl* Ivar = (*IVI);
+
+ if (IdentifierInfo *II = Ivar->getIdentifier()) {
+ ObjCIvarDecl* prevIvar =
+ ID->getSuperClass()->lookupInstanceVariable(Context, II);
+ if (prevIvar) {
+ Diag(Ivar->getLocation(), diag::err_duplicate_member) << II;
+ Diag(prevIvar->getLocation(), diag::note_previous_declaration);
+ }
+ }
+ }
+ }
+ } else if (ObjCImplementationDecl *IMPDecl =
+ dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
+ assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
+ for (unsigned I = 0, N = RecFields.size(); I != N; ++I) {
+ // FIXME: Set the DeclContext correctly when we build the
+ // declarations.
+ ClsFields[I]->setLexicalDeclContext(IMPDecl);
+ IMPDecl->addDecl(Context, ClsFields[I]);
+ }
+ CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
+ }
+ }
+
+ if (Attr)
+ ProcessDeclAttributeList(Record, Attr);
+}
+
+EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
+ EnumConstantDecl *LastEnumConst,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ ExprArg val) {
+ Expr *Val = (Expr *)val.get();
+
+ llvm::APSInt EnumVal(32);
+ QualType EltTy;
+ if (Val && !Val->isTypeDependent()) {
+ // Make sure to promote the operand type to int.
+ UsualUnaryConversions(Val);
+ if (Val != val.get()) {
+ val.release();
+ val = Val;
+ }
+
+ // C99 6.7.2.2p2: Make sure we have an integer constant expression.
+ SourceLocation ExpLoc;
+ if (!Val->isValueDependent() &&
+ VerifyIntegerConstantExpression(Val, &EnumVal)) {
+ Val = 0;
+ } else {
+ EltTy = Val->getType();
+ }
+ }
+
+ if (!Val) {
+ if (LastEnumConst) {
+ // Assign the last value + 1.
+ EnumVal = LastEnumConst->getInitVal();
+ ++EnumVal;
+
+ // Check for overflow on increment.
+ if (EnumVal < LastEnumConst->getInitVal())
+ Diag(IdLoc, diag::warn_enum_value_overflow);
+
+ EltTy = LastEnumConst->getType();
+ } else {
+ // First value, set to zero.
+ EltTy = Context.IntTy;
+ EnumVal.zextOrTrunc(static_cast<uint32_t>(Context.getTypeSize(EltTy)));
+ }
+ }
+
+ val.release();
+ return EnumConstantDecl::Create(Context, Enum, IdLoc, Id, EltTy,
+ Val, EnumVal);
+}
+
+
+Sema::DeclPtrTy Sema::ActOnEnumConstant(Scope *S, DeclPtrTy theEnumDecl,
+ DeclPtrTy lastEnumConst,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ SourceLocation EqualLoc, ExprTy *val) {
+ EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl.getAs<Decl>());
+ EnumConstantDecl *LastEnumConst =
+ cast_or_null<EnumConstantDecl>(lastEnumConst.getAs<Decl>());
+ Expr *Val = static_cast<Expr*>(val);
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ S = getNonFieldDeclScope(S);
+
+ // Verify that there isn't already something declared with this name in this
+ // scope.
+ NamedDecl *PrevDecl = LookupName(S, Id, LookupOrdinaryName);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(IdLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ if (PrevDecl) {
+ // When in C++, we may get a TagDecl with the same name; in this case the
+ // enum constant will 'hide' the tag.
+ assert((getLangOptions().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
+ "Received TagDecl when not in C++!");
+ if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S)) {
+ if (isa<EnumConstantDecl>(PrevDecl))
+ Diag(IdLoc, diag::err_redefinition_of_enumerator) << Id;
+ else
+ Diag(IdLoc, diag::err_redefinition) << Id;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ if (Val) Val->Destroy(Context);
+ return DeclPtrTy();
+ }
+ }
+
+ EnumConstantDecl *New = CheckEnumConstant(TheEnumDecl, LastEnumConst,
+ IdLoc, Id, Owned(Val));
+
+ // Register this decl in the current scope stack.
+ if (New)
+ PushOnScopeChains(New, S);
+
+ return DeclPtrTy::make(New);
+}
+
+void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
+ SourceLocation RBraceLoc, DeclPtrTy EnumDeclX,
+ DeclPtrTy *Elements, unsigned NumElements) {
+ EnumDecl *Enum = cast<EnumDecl>(EnumDeclX.getAs<Decl>());
+ QualType EnumType = Context.getTypeDeclType(Enum);
+
+ // TODO: If the result value doesn't fit in an int, it must be a long or long
+ // long value. ISO C does not support this, but GCC does as an extension,
+ // emit a warning.
+ unsigned IntWidth = Context.Target.getIntWidth();
+
+ // Verify that all the values are okay, compute the size of the values, and
+ // reverse the list.
+ unsigned NumNegativeBits = 0;
+ unsigned NumPositiveBits = 0;
+
+ // Keep track of whether all elements have type int.
+ bool AllElementsInt = true;
+
+ for (unsigned i = 0; i != NumElements; ++i) {
+ EnumConstantDecl *ECD =
+ cast_or_null<EnumConstantDecl>(Elements[i].getAs<Decl>());
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ // If the enum value doesn't fit in an int, emit an extension warning.
+ const llvm::APSInt &InitVal = ECD->getInitVal();
+ assert(InitVal.getBitWidth() >= IntWidth &&
+ "Should have promoted value to int");
+ if (InitVal.getBitWidth() > IntWidth) {
+ llvm::APSInt V(InitVal);
+ V.trunc(IntWidth);
+ V.extend(InitVal.getBitWidth());
+ if (V != InitVal)
+ Diag(ECD->getLocation(), diag::ext_enum_value_not_int)
+ << InitVal.toString(10);
+ }
+
+ // Keep track of the size of positive and negative values.
+ if (InitVal.isUnsigned() || InitVal.isNonNegative())
+ NumPositiveBits = std::max(NumPositiveBits,
+ (unsigned)InitVal.getActiveBits());
+ else
+ NumNegativeBits = std::max(NumNegativeBits,
+ (unsigned)InitVal.getMinSignedBits());
+
+ // Keep track of whether every enum element has type int (very commmon).
+ if (AllElementsInt)
+ AllElementsInt = ECD->getType() == Context.IntTy;
+ }
+
+ // Figure out the type that should be used for this enum.
+ // FIXME: Support attribute(packed) on enums and -fshort-enums.
+ QualType BestType;
+ unsigned BestWidth;
+
+ if (NumNegativeBits) {
+ // If there is a negative value, figure out the smallest integer type (of
+ // int/long/longlong) that fits.
+ if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
+ BestType = Context.IntTy;
+ BestWidth = IntWidth;
+ } else {
+ BestWidth = Context.Target.getLongWidth();
+
+ if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth)
+ BestType = Context.LongTy;
+ else {
+ BestWidth = Context.Target.getLongLongWidth();
+
+ if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
+ Diag(Enum->getLocation(), diag::warn_enum_too_large);
+ BestType = Context.LongLongTy;
+ }
+ }
+ } else {
+ // If there is no negative value, figure out which of uint, ulong, ulonglong
+ // fits.
+ if (NumPositiveBits <= IntWidth) {
+ BestType = Context.UnsignedIntTy;
+ BestWidth = IntWidth;
+ } else if (NumPositiveBits <=
+ (BestWidth = Context.Target.getLongWidth())) {
+ BestType = Context.UnsignedLongTy;
+ } else {
+ BestWidth = Context.Target.getLongLongWidth();
+ assert(NumPositiveBits <= BestWidth &&
+ "How could an initializer get larger than ULL?");
+ BestType = Context.UnsignedLongLongTy;
+ }
+ }
+
+ // Loop over all of the enumerator constants, changing their types to match
+ // the type of the enum if needed.
+ for (unsigned i = 0; i != NumElements; ++i) {
+ EnumConstantDecl *ECD =
+ cast_or_null<EnumConstantDecl>(Elements[i].getAs<Decl>());
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ // Standard C says the enumerators have int type, but we allow, as an
+ // extension, the enumerators to be larger than int size. If each
+ // enumerator value fits in an int, type it as an int, otherwise type it the
+ // same as the enumerator decl itself. This means that in "enum { X = 1U }"
+ // that X has type 'int', not 'unsigned'.
+ if (ECD->getType() == Context.IntTy) {
+ // Make sure the init value is signed.
+ llvm::APSInt IV = ECD->getInitVal();
+ IV.setIsSigned(true);
+ ECD->setInitVal(IV);
+
+ if (getLangOptions().CPlusPlus)
+ // C++ [dcl.enum]p4: Following the closing brace of an
+ // enum-specifier, each enumerator has the type of its
+ // enumeration.
+ ECD->setType(EnumType);
+ continue; // Already int type.
+ }
+
+ // Determine whether the value fits into an int.
+ llvm::APSInt InitVal = ECD->getInitVal();
+ bool FitsInInt;
+ if (InitVal.isUnsigned() || !InitVal.isNegative())
+ FitsInInt = InitVal.getActiveBits() < IntWidth;
+ else
+ FitsInInt = InitVal.getMinSignedBits() <= IntWidth;
+
+ // If it fits into an integer type, force it. Otherwise force it to match
+ // the enum decl type.
+ QualType NewTy;
+ unsigned NewWidth;
+ bool NewSign;
+ if (FitsInInt) {
+ NewTy = Context.IntTy;
+ NewWidth = IntWidth;
+ NewSign = true;
+ } else if (ECD->getType() == BestType) {
+ // Already the right type!
+ if (getLangOptions().CPlusPlus)
+ // C++ [dcl.enum]p4: Following the closing brace of an
+ // enum-specifier, each enumerator has the type of its
+ // enumeration.
+ ECD->setType(EnumType);
+ continue;
+ } else {
+ NewTy = BestType;
+ NewWidth = BestWidth;
+ NewSign = BestType->isSignedIntegerType();
+ }
+
+ // Adjust the APSInt value.
+ InitVal.extOrTrunc(NewWidth);
+ InitVal.setIsSigned(NewSign);
+ ECD->setInitVal(InitVal);
+
+ // Adjust the Expr initializer and type.
+ if (ECD->getInitExpr())
+ ECD->setInitExpr(new (Context) ImplicitCastExpr(NewTy, ECD->getInitExpr(),
+ /*isLvalue=*/false));
+ if (getLangOptions().CPlusPlus)
+ // C++ [dcl.enum]p4: Following the closing brace of an
+ // enum-specifier, each enumerator has the type of its
+ // enumeration.
+ ECD->setType(EnumType);
+ else
+ ECD->setType(NewTy);
+ }
+
+ Enum->completeDefinition(Context, BestType);
+}
+
+Sema::DeclPtrTy Sema::ActOnFileScopeAsmDecl(SourceLocation Loc,
+ ExprArg expr) {
+ StringLiteral *AsmString = cast<StringLiteral>(expr.takeAs<Expr>());
+
+ FileScopeAsmDecl *New = FileScopeAsmDecl::Create(Context, CurContext,
+ Loc, AsmString);
+ CurContext->addDecl(Context, New);
+ return DeclPtrTy::make(New);
+}
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
new file mode 100644
index 0000000..99b4d77
--- /dev/null
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -0,0 +1,1803 @@
+//===--- SemaDeclAttr.cpp - Declaration Attribute Handling ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements decl-related attribute processing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Parse/DeclSpec.h"
+#include <llvm/ADT/StringExtras.h>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Helper functions
+//===----------------------------------------------------------------------===//
+
+static const FunctionType *getFunctionType(Decl *d, bool blocksToo = true) {
+ QualType Ty;
+ if (ValueDecl *decl = dyn_cast<ValueDecl>(d))
+ Ty = decl->getType();
+ else if (FieldDecl *decl = dyn_cast<FieldDecl>(d))
+ Ty = decl->getType();
+ else if (TypedefDecl* decl = dyn_cast<TypedefDecl>(d))
+ Ty = decl->getUnderlyingType();
+ else
+ return 0;
+
+ if (Ty->isFunctionPointerType())
+ Ty = Ty->getAsPointerType()->getPointeeType();
+ else if (blocksToo && Ty->isBlockPointerType())
+ Ty = Ty->getAsBlockPointerType()->getPointeeType();
+
+ return Ty->getAsFunctionType();
+}
+
+// FIXME: We should provide an abstraction around a method or function
+// to provide the following bits of information.
+
+/// isFunctionOrMethod - Return true if the given decl has function
+/// type (function or function-typed variable) or an Objective-C
+/// method.
+static bool isFunctionOrMethod(Decl *d) {
+ return getFunctionType(d, false) || isa<ObjCMethodDecl>(d);
+}
+
+/// isFunctionOrMethodOrBlock - Return true if the given decl has function
+/// type (function or function-typed variable) or an Objective-C
+/// method or a block.
+static bool isFunctionOrMethodOrBlock(Decl *d) {
+ if (isFunctionOrMethod(d))
+ return true;
+ // check for block is more involved.
+ if (const VarDecl *V = dyn_cast<VarDecl>(d)) {
+ QualType Ty = V->getType();
+ return Ty->isBlockPointerType();
+ }
+ return isa<BlockDecl>(d);
+}
+
+/// hasFunctionProto - Return true if the given decl has a argument
+/// information. This decl should have already passed
+/// isFunctionOrMethod or isFunctionOrMethodOrBlock.
+static bool hasFunctionProto(Decl *d) {
+ if (const FunctionType *FnTy = getFunctionType(d))
+ return isa<FunctionProtoType>(FnTy);
+ else {
+ assert(isa<ObjCMethodDecl>(d) || isa<BlockDecl>(d));
+ return true;
+ }
+}
+
+/// getFunctionOrMethodNumArgs - Return number of function or method
+/// arguments. It is an error to call this on a K&R function (use
+/// hasFunctionProto first).
+static unsigned getFunctionOrMethodNumArgs(Decl *d) {
+ if (const FunctionType *FnTy = getFunctionType(d))
+ return cast<FunctionProtoType>(FnTy)->getNumArgs();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(d))
+ return BD->getNumParams();
+ return cast<ObjCMethodDecl>(d)->param_size();
+}
+
+static QualType getFunctionOrMethodArgType(Decl *d, unsigned Idx) {
+ if (const FunctionType *FnTy = getFunctionType(d))
+ return cast<FunctionProtoType>(FnTy)->getArgType(Idx);
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(d))
+ return BD->getParamDecl(Idx)->getType();
+
+ return cast<ObjCMethodDecl>(d)->param_begin()[Idx]->getType();
+}
+
+static QualType getFunctionOrMethodResultType(Decl *d) {
+ if (const FunctionType *FnTy = getFunctionType(d))
+ return cast<FunctionProtoType>(FnTy)->getResultType();
+ return cast<ObjCMethodDecl>(d)->getResultType();
+}
+
+static bool isFunctionOrMethodVariadic(Decl *d) {
+ if (const FunctionType *FnTy = getFunctionType(d)) {
+ const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy);
+ return proto->isVariadic();
+ } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(d))
+ return BD->IsVariadic();
+ else {
+ return cast<ObjCMethodDecl>(d)->isVariadic();
+ }
+}
+
+static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
+ const PointerType *PT = T->getAsPointerType();
+ if (!PT)
+ return false;
+
+ const ObjCInterfaceType *ClsT =PT->getPointeeType()->getAsObjCInterfaceType();
+ if (!ClsT)
+ return false;
+
+ IdentifierInfo* ClsName = ClsT->getDecl()->getIdentifier();
+
+ // FIXME: Should we walk the chain of classes?
+ return ClsName == &Ctx.Idents.get("NSString") ||
+ ClsName == &Ctx.Idents.get("NSMutableString");
+}
+
+static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
+ const PointerType *PT = T->getAsPointerType();
+ if (!PT)
+ return false;
+
+ const RecordType *RT = PT->getPointeeType()->getAsRecordType();
+ if (!RT)
+ return false;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->getTagKind() != TagDecl::TK_struct)
+ return false;
+
+ return RD->getIdentifier() == &Ctx.Idents.get("__CFString");
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Implementations
+//===----------------------------------------------------------------------===//
+
+// FIXME: All this manual attribute parsing code is gross. At the
+// least add some helper functions to check most argument patterns (#
+// and types of args).
+
+static void HandleExtVectorTypeAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ TypedefDecl *tDecl = dyn_cast<TypedefDecl>(d);
+ if (tDecl == 0) {
+ S.Diag(Attr.getLoc(), diag::err_typecheck_ext_vector_not_typedef);
+ return;
+ }
+
+ QualType curType = tDecl->getUnderlyingType();
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ Expr *sizeExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt vecSize(32);
+ if (!sizeExpr->isIntegerConstantExpr(vecSize, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "ext_vector_type" << sizeExpr->getSourceRange();
+ return;
+ }
+ // unlike gcc's vector_size attribute, we do not allow vectors to be defined
+ // in conjunction with complex types (pointers, arrays, functions, etc.).
+ if (!curType->isIntegerType() && !curType->isRealFloatingType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << curType;
+ return;
+ }
+ // unlike gcc's vector_size attribute, the size is specified as the
+ // number of elements, not the number of bytes.
+ unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue());
+
+ if (vectorSize == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_zero_size)
+ << sizeExpr->getSourceRange();
+ return;
+ }
+ // Instantiate/Install the vector type, the number of elements is > 0.
+ tDecl->setUnderlyingType(S.Context.getExtVectorType(curType, vectorSize));
+ // Remember this typedef decl, we will need it later for diagnostics.
+ S.ExtVectorDecls.push_back(tDecl);
+}
+
+
+/// HandleVectorSizeAttribute - this attribute is only applicable to
+/// integral and float scalars, although arrays, pointers, and function
+/// return values are allowed in conjunction with this construct. Aggregates
+/// with this attribute are invalid, even if they are of the same size as a
+/// corresponding scalar.
+/// The raw attribute should contain precisely 1 argument, the vector size
+/// for the variable, measured in bytes. If curType and rawAttr are well
+/// formed, this routine will return a new vector type.
+static void HandleVectorSizeAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ QualType CurType;
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ CurType = VD->getType();
+ else if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D))
+ CurType = TD->getUnderlyingType();
+ else {
+ S.Diag(D->getLocation(), diag::err_attr_wrong_decl)
+ << "vector_size" << SourceRange(Attr.getLoc(), Attr.getLoc());
+ return;
+ }
+
+ // Check the attribute arugments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ Expr *sizeExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt vecSize(32);
+ if (!sizeExpr->isIntegerConstantExpr(vecSize, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "vector_size" << sizeExpr->getSourceRange();
+ return;
+ }
+ // navigate to the base type - we need to provide for vector pointers,
+ // vector arrays, and functions returning vectors.
+ if (CurType->isPointerType() || CurType->isArrayType() ||
+ CurType->isFunctionType()) {
+ S.Diag(Attr.getLoc(), diag::err_unsupported_vector_size) << CurType;
+ return;
+ /* FIXME: rebuild the type from the inside out, vectorizing the inner type.
+ do {
+ if (PointerType *PT = dyn_cast<PointerType>(canonType))
+ canonType = PT->getPointeeType().getTypePtr();
+ else if (ArrayType *AT = dyn_cast<ArrayType>(canonType))
+ canonType = AT->getElementType().getTypePtr();
+ else if (FunctionType *FT = dyn_cast<FunctionType>(canonType))
+ canonType = FT->getResultType().getTypePtr();
+ } while (canonType->isPointerType() || canonType->isArrayType() ||
+ canonType->isFunctionType());
+ */
+ }
+ // the base type must be integer or float, and can't already be a vector.
+ if (CurType->isVectorType() ||
+ (!CurType->isIntegerType() && !CurType->isRealFloatingType())) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
+ return;
+ }
+ unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
+ // vecSize is specified in bytes - convert to bits.
+ unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue() * 8);
+
+ // the vector size needs to be an integral multiple of the type size.
+ if (vectorSize % typeSize) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_size)
+ << sizeExpr->getSourceRange();
+ return;
+ }
+ if (vectorSize == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_zero_size)
+ << sizeExpr->getSourceRange();
+ return;
+ }
+
+ // Success! Instantiate the vector type, the number of elements is > 0, and
+ // not required to be a power of 2, unlike GCC.
+ CurType = S.Context.getVectorType(CurType, vectorSize/typeSize);
+
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ VD->setType(CurType);
+ else
+ cast<TypedefDecl>(D)->setUnderlyingType(CurType);
+}
+
+static void HandlePackedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (TagDecl *TD = dyn_cast<TagDecl>(d))
+ TD->addAttr(::new (S.Context) PackedAttr(1));
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(d)) {
+ // If the alignment is less than or equal to 8 bits, the packed attribute
+ // has no effect.
+ if (!FD->getType()->isIncompleteType() &&
+ S.Context.getTypeAlign(FD->getType()) <= 8)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
+ << Attr.getName() << FD->getType();
+ else
+ FD->addAttr(::new (S.Context) PackedAttr(1));
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+}
+
+static void HandleIBOutletAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // The IBOutlet attribute only applies to instance variables of Objective-C
+ // classes.
+ if (isa<ObjCIvarDecl>(d) || isa<ObjCPropertyDecl>(d))
+ d->addAttr(::new (S.Context) IBOutletAttr());
+ else
+ S.Diag(Attr.getLoc(), diag::err_attribute_iboutlet);
+}
+
+static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // GCC ignores the nonnull attribute on K&R style function
+ // prototypes, so we ignore it as well
+ if (!isFunctionOrMethod(d) || !hasFunctionProto(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ unsigned NumArgs = getFunctionOrMethodNumArgs(d);
+
+ // The nonnull attribute only applies to pointers.
+ llvm::SmallVector<unsigned, 10> NonNullArgs;
+
+ for (AttributeList::arg_iterator I=Attr.arg_begin(),
+ E=Attr.arg_end(); I!=E; ++I) {
+
+
+ // The argument must be an integer constant expression.
+ Expr *Ex = static_cast<Expr *>(*I);
+ llvm::APSInt ArgNum(32);
+ if (!Ex->isIntegerConstantExpr(ArgNum, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "nonnull" << Ex->getSourceRange();
+ return;
+ }
+
+ unsigned x = (unsigned) ArgNum.getZExtValue();
+
+ if (x < 1 || x > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "nonnull" << I.getArgNum() << Ex->getSourceRange();
+ return;
+ }
+
+ --x;
+
+ // Is the function argument a pointer type?
+ QualType T = getFunctionOrMethodArgType(d, x);
+ if (!T->isPointerType() && !T->isBlockPointerType()) {
+ // FIXME: Should also highlight argument in decl.
+ S.Diag(Attr.getLoc(), diag::err_nonnull_pointers_only)
+ << "nonnull" << Ex->getSourceRange();
+ continue;
+ }
+
+ NonNullArgs.push_back(x);
+ }
+
+ // If no arguments were specified to __attribute__((nonnull)) then all
+ // pointer arguments have a nonnull attribute.
+ if (NonNullArgs.empty()) {
+ for (unsigned I = 0, E = getFunctionOrMethodNumArgs(d); I != E; ++I) {
+ QualType T = getFunctionOrMethodArgType(d, I);
+ if (T->isPointerType() || T->isBlockPointerType())
+ NonNullArgs.push_back(I);
+ }
+
+ if (NonNullArgs.empty()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
+ return;
+ }
+ }
+
+ unsigned* start = &NonNullArgs[0];
+ unsigned size = NonNullArgs.size();
+ std::sort(start, start + size);
+ d->addAttr(::new (S.Context) NonNullAttr(start, size));
+}
+
+static void HandleAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ Expr *Arg = static_cast<Expr*>(Attr.getArg(0));
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+
+ if (Str == 0 || Str->isWide()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "alias" << 1;
+ return;
+ }
+
+ const char *Alias = Str->getStrData();
+ unsigned AliasLen = Str->getByteLength();
+
+ // FIXME: check if target symbol exists in current file
+
+ d->addAttr(::new (S.Context) AliasAttr(std::string(Alias, AliasLen)));
+}
+
+static void HandleAlwaysInlineAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) AlwaysInlineAttr());
+}
+
+static bool HandleCommonNoReturnAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return false;
+ }
+
+ if (!isFunctionOrMethod(d) && !isa<BlockDecl>(d)) {
+ ValueDecl *VD = dyn_cast<ValueDecl>(d);
+ if (VD == 0 || !VD->getType()->isBlockPointerType()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void HandleNoReturnAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (HandleCommonNoReturnAttr(d, Attr, S))
+ d->addAttr(::new (S.Context) NoReturnAttr());
+}
+
+static void HandleAnalyzerNoReturnAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ if (HandleCommonNoReturnAttr(d, Attr, S))
+ d->addAttr(::new (S.Context) AnalyzerNoReturnAttr());
+}
+
+static void HandleUnusedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<VarDecl>(d) && !isFunctionOrMethod(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) UnusedAttr());
+}
+
+static void HandleUsedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(d)) {
+ if (VD->hasLocalStorage() || VD->hasExternalStorage()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "used";
+ return;
+ }
+ } else if (!isFunctionOrMethod(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) UsedAttr());
+}
+
+static void HandleConstructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0 && Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << "0 or 1";
+ return;
+ }
+
+ int priority = 65535; // FIXME: Do not hardcode such constants.
+ if (Attr.getNumArgs() > 0) {
+ Expr *E = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt Idx(32);
+ if (!E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "constructor" << 1 << E->getSourceRange();
+ return;
+ }
+ priority = Idx.getZExtValue();
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) ConstructorAttr(priority));
+}
+
+static void HandleDestructorAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0 && Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << "0 or 1";
+ return;
+ }
+
+ int priority = 65535; // FIXME: Do not hardcode such constants.
+ if (Attr.getNumArgs() > 0) {
+ Expr *E = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt Idx(32);
+ if (!E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "destructor" << 1 << E->getSourceRange();
+ return;
+ }
+ priority = Idx.getZExtValue();
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) DestructorAttr(priority));
+}
+
+static void HandleDeprecatedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) DeprecatedAttr());
+}
+
+static void HandleUnavailableAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) UnavailableAttr());
+}
+
+static void HandleVisibilityAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ Expr *Arg = static_cast<Expr*>(Attr.getArg(0));
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+
+ if (Str == 0 || Str->isWide()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "visibility" << 1;
+ return;
+ }
+
+ const char *TypeStr = Str->getStrData();
+ unsigned TypeLen = Str->getByteLength();
+ VisibilityAttr::VisibilityTypes type;
+
+ if (TypeLen == 7 && !memcmp(TypeStr, "default", 7))
+ type = VisibilityAttr::DefaultVisibility;
+ else if (TypeLen == 6 && !memcmp(TypeStr, "hidden", 6))
+ type = VisibilityAttr::HiddenVisibility;
+ else if (TypeLen == 8 && !memcmp(TypeStr, "internal", 8))
+ type = VisibilityAttr::HiddenVisibility; // FIXME
+ else if (TypeLen == 9 && !memcmp(TypeStr, "protected", 9))
+ type = VisibilityAttr::ProtectedVisibility;
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_unknown_visibility) << TypeStr;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) VisibilityAttr(type));
+}
+
+static void HandleObjCExceptionAttr(Decl *D, const AttributeList &Attr,
+ Sema &S) {
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ ObjCInterfaceDecl *OCI = dyn_cast<ObjCInterfaceDecl>(D);
+ if (OCI == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_requires_objc_interface);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ObjCExceptionAttr());
+}
+
+static void HandleObjCNSObject(Decl *D, const AttributeList &Attr, Sema &S) {
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
+ QualType T = TD->getUnderlyingType();
+ if (!T->isPointerType() ||
+ !T->getAsPointerType()->getPointeeType()->isRecordType()) {
+ S.Diag(TD->getLocation(), diag::err_nsobject_attribute);
+ return;
+ }
+ }
+ D->addAttr(::new (S.Context) ObjCNSObjectAttr());
+}
+
+static void
+HandleOverloadableAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_overloadable_not_function);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) OverloadableAttr());
+}
+
+static void HandleBlocksAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "blocks" << 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ BlocksAttr::BlocksAttrTypes type;
+ if (Attr.getParameterName()->isStr("byref"))
+ type = BlocksAttr::ByRef;
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "blocks" << Attr.getParameterName();
+ return;
+ }
+
+ d->addAttr(::new (S.Context) BlocksAttr(type));
+}
+
+static void HandleSentinelAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << "0, 1 or 2";
+ return;
+ }
+
+ int sentinel = 0;
+ if (Attr.getNumArgs() > 0) {
+ Expr *E = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt Idx(32);
+ if (!E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "sentinel" << 1 << E->getSourceRange();
+ return;
+ }
+ sentinel = Idx.getZExtValue();
+
+ if (sentinel < 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_less_than_zero)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
+ int nullPos = 0;
+ if (Attr.getNumArgs() > 1) {
+ Expr *E = static_cast<Expr *>(Attr.getArg(1));
+ llvm::APSInt Idx(32);
+ if (!E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "sentinel" << 2 << E->getSourceRange();
+ return;
+ }
+ nullPos = Idx.getZExtValue();
+
+ if (nullPos > 1 || nullPos < 0) {
+ // FIXME: This error message could be improved, it would be nice
+ // to say what the bounds actually are.
+ S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_not_zero_or_one)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(d)) {
+ const FunctionType *FT = FD->getType()->getAsFunctionType();
+ assert(FT && "FunctionDecl has non-function type?");
+
+ if (isa<FunctionNoProtoType>(FT)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_named_arguments);
+ return;
+ }
+
+ if (!cast<FunctionProtoType>(FT)->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ return;
+ }
+ } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(d)) {
+ if (!MD->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ return;
+ }
+ } else if (isa<BlockDecl>(d)) {
+ // Note! BlockDecl is typeless. Variadic diagnostics
+ // will be issued by the caller.
+ ;
+ } else if (const VarDecl *V = dyn_cast<VarDecl>(d)) {
+ QualType Ty = V->getType();
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
+ const FunctionType *FT = Ty->isFunctionPointerType() ? getFunctionType(d)
+ : Ty->getAsBlockPointerType()->getPointeeType()->getAsFunctionType();
+ if (!cast<FunctionProtoType>(FT)->isVariadic()) {
+ int m = Ty->isFunctionPointerType() ? 0 : 1;
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m;
+ return;
+ }
+ }
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 6 /*function, method or block */;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 6 /*function, method or block */;
+ return;
+ }
+ d->addAttr(::new (S.Context) SentinelAttr(sentinel, nullPos));
+}
+
+static void HandleWarnUnusedResult(Decl *D, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // TODO: could also be applied to methods?
+ FunctionDecl *Fn = dyn_cast<FunctionDecl>(D);
+ if (!Fn) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ Fn->addAttr(::new (S.Context) WarnUnusedResultAttr());
+}
+
+static void HandleWeakAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // TODO: could also be applied to methods?
+ if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) WeakAttr());
+}
+
+static void HandleWeakImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // weak_import only applies to variable & function declarations.
+ bool isDef = false;
+ if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ isDef = (!VD->hasExternalStorage() || VD->getInit());
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ isDef = FD->getBody(S.Context);
+ } else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ // We ignore weak import on properties and methods
+ return;
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ // Merge should handle any subsequent violations.
+ if (isDef) {
+ S.Diag(Attr.getLoc(),
+ diag::warn_attribute_weak_import_invalid_on_definition)
+ << "weak_import" << 2 /*variable and function*/;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) WeakImportAttr());
+}
+
+static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // Attribute can be applied only to functions or variables.
+ if (isa<VarDecl>(D)) {
+ D->addAttr(::new (S.Context) DLLImportAttr());
+ return;
+ }
+
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ // Currently, the dllimport attribute is ignored for inlined functions.
+ // Warning is emitted.
+ if (FD->isInline()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
+ return;
+ }
+
+ // The attribute is also overridden by a subsequent declaration as dllexport.
+ // Warning is emitted.
+ for (AttributeList *nextAttr = Attr.getNext(); nextAttr;
+ nextAttr = nextAttr->getNext()) {
+ if (nextAttr->getKind() == AttributeList::AT_dllexport) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
+ return;
+ }
+ }
+
+ if (D->getAttr<DLLExportAttr>()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) DLLImportAttr());
+}
+
+static void HandleDLLExportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // Attribute can be applied only to functions or variables.
+ if (isa<VarDecl>(D)) {
+ D->addAttr(::new (S.Context) DLLExportAttr());
+ return;
+ }
+
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ // Currently, the dllexport attribute is ignored for inlined functions,
+ // unless the -fkeep-inline-functions flag has been used. Warning is emitted;
+ if (FD->isInline()) {
+ // FIXME: ... unless the -fkeep-inline-functions flag has been used.
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllexport";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) DLLExportAttr());
+}
+
+static void HandleSectionAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // Attribute has no arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ // Make sure that there is a string literal as the sections's single
+ // argument.
+ StringLiteral *SE =
+ dyn_cast<StringLiteral>(static_cast<Expr *>(Attr.getArg(0)));
+ if (!SE) {
+ // FIXME
+ S.Diag(Attr.getLoc(), diag::err_attribute_annotate_no_string);
+ return;
+ }
+ D->addAttr(::new (S.Context) SectionAttr(std::string(SE->getStrData(),
+ SE->getByteLength())));
+}
+
+static void HandleStdCallAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // Attribute has no arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // Attribute can be applied only to functions.
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ // stdcall and fastcall attributes are mutually incompatible.
+ if (d->getAttr<FastCallAttr>()) {
+ S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "stdcall" << "fastcall";
+ return;
+ }
+
+ d->addAttr(::new (S.Context) StdCallAttr());
+}
+
+static void HandleFastCallAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // Attribute has no arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ // stdcall and fastcall attributes are mutually incompatible.
+ if (d->getAttr<StdCallAttr>()) {
+ S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "fastcall" << "stdcall";
+ return;
+ }
+
+ d->addAttr(::new (S.Context) FastCallAttr());
+}
+
+static void HandleNothrowAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) NoThrowAttr());
+}
+
+static void HandleConstAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) ConstAttr());
+}
+
+static void HandlePureAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) PureAttr());
+}
+
+static void HandleCleanupAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // Match gcc which ignores cleanup attrs when compiling C++.
+ if (S.getLangOptions().CPlusPlus)
+ return;
+
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ VarDecl *VD = dyn_cast<VarDecl>(d);
+
+ if (!VD || !VD->hasLocalStorage()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "cleanup";
+ return;
+ }
+
+ // Look up the function
+ NamedDecl *CleanupDecl = S.LookupName(S.TUScope, Attr.getParameterName(),
+ Sema::LookupOrdinaryName);
+ if (!CleanupDecl) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_cleanup_arg_not_found) <<
+ Attr.getParameterName();
+ return;
+ }
+
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(CleanupDecl);
+ if (!FD) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_cleanup_arg_not_function) <<
+ Attr.getParameterName();
+ return;
+ }
+
+ if (FD->getNumParams() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_cleanup_func_must_take_one_arg) <<
+ Attr.getParameterName();
+ return;
+ }
+
+ // We're currently more strict than GCC about what function types we accept.
+ // If this ever proves to be a problem it should be easy to fix.
+ QualType Ty = S.Context.getPointerType(VD->getType());
+ QualType ParamTy = FD->getParamDecl(0)->getType();
+ if (S.CheckAssignmentConstraints(ParamTy, Ty) != Sema::Compatible) {
+ S.Diag(Attr.getLoc(),
+ diag::err_attribute_cleanup_func_arg_incompatible_type) <<
+ Attr.getParameterName() << ParamTy << Ty;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) CleanupAttr(FD));
+}
+
+/// Handle __attribute__((format_arg((idx)))) attribute
+/// based on http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+static void HandleFormatArgAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ if (!isFunctionOrMethod(d) || !hasFunctionProto(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+ // FIXME: in C++ the implicit 'this' function parameter also counts.
+ // this is needed in order to be compatible with GCC
+ // the index must start with 1.
+ unsigned NumArgs = getFunctionOrMethodNumArgs(d);
+ unsigned FirstIdx = 1;
+ // checks for the 2nd argument
+ Expr *IdxExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt Idx(32);
+ if (!IdxExpr->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ if (Idx.getZExtValue() < FirstIdx || Idx.getZExtValue() > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ unsigned ArgIdx = Idx.getZExtValue() - 1;
+
+ // make sure the format string is really a string
+ QualType Ty = getFunctionOrMethodArgType(d, ArgIdx);
+
+ bool not_nsstring_type = !isNSStringType(Ty, S.Context);
+ if (not_nsstring_type &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->getAsPointerType()->getPointeeType()->isCharType())) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << (not_nsstring_type ? "a string type" : "an NSString")
+ << IdxExpr->getSourceRange();
+ return;
+ }
+ Ty = getFunctionOrMethodResultType(d);
+ if (!isNSStringType(Ty, S.Context) &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->getAsPointerType()->getPointeeType()->isCharType())) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_result_not)
+ << (not_nsstring_type ? "string type" : "NSString")
+ << IdxExpr->getSourceRange();
+ return;
+ }
+
+ d->addAttr(::new (S.Context) FormatArgAttr(Idx.getZExtValue()));
+}
+
+/// Handle __attribute__((format(type,idx,firstarg))) attributes
+/// based on http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "format" << 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 3;
+ return;
+ }
+
+ if (!isFunctionOrMethodOrBlock(d) || !hasFunctionProto(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ // FIXME: in C++ the implicit 'this' function parameter also counts.
+ // this is needed in order to be compatible with GCC
+ // the index must start in 1 and the limit is numargs+1
+ unsigned NumArgs = getFunctionOrMethodNumArgs(d);
+ unsigned FirstIdx = 1;
+
+ const char *Format = Attr.getParameterName()->getName();
+ unsigned FormatLen = Attr.getParameterName()->getLength();
+
+ // Normalize the argument, __foo__ becomes foo.
+ if (FormatLen > 4 && Format[0] == '_' && Format[1] == '_' &&
+ Format[FormatLen - 2] == '_' && Format[FormatLen - 1] == '_') {
+ Format += 2;
+ FormatLen -= 4;
+ }
+
+ bool Supported = false;
+ bool is_NSString = false;
+ bool is_strftime = false;
+ bool is_CFString = false;
+
+ switch (FormatLen) {
+ default: break;
+ case 5: Supported = !memcmp(Format, "scanf", 5); break;
+ case 6: Supported = !memcmp(Format, "printf", 6); break;
+ case 7: Supported = !memcmp(Format, "strfmon", 7); break;
+ case 8:
+ Supported = (is_strftime = !memcmp(Format, "strftime", 8)) ||
+ (is_NSString = !memcmp(Format, "NSString", 8)) ||
+ (is_CFString = !memcmp(Format, "CFString", 8));
+ break;
+ }
+
+ if (!Supported) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "format" << Attr.getParameterName()->getName();
+ return;
+ }
+
+ // checks for the 2nd argument
+ Expr *IdxExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt Idx(32);
+ if (!IdxExpr->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ if (Idx.getZExtValue() < FirstIdx || Idx.getZExtValue() > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ // FIXME: Do we need to bounds check?
+ unsigned ArgIdx = Idx.getZExtValue() - 1;
+
+ // make sure the format string is really a string
+ QualType Ty = getFunctionOrMethodArgType(d, ArgIdx);
+
+ if (is_CFString) {
+ if (!isCFStringType(Ty, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "a CFString" << IdxExpr->getSourceRange();
+ return;
+ }
+ } else if (is_NSString) {
+ // FIXME: do we need to check if the type is NSString*? What are the
+ // semantics?
+ if (!isNSStringType(Ty, S.Context)) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "an NSString" << IdxExpr->getSourceRange();
+ return;
+ }
+ } else if (!Ty->isPointerType() ||
+ !Ty->getAsPointerType()->getPointeeType()->isCharType()) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "a string type" << IdxExpr->getSourceRange();
+ return;
+ }
+
+ // check the 3rd argument
+ Expr *FirstArgExpr = static_cast<Expr *>(Attr.getArg(1));
+ llvm::APSInt FirstArg(32);
+ if (!FirstArgExpr->isIntegerConstantExpr(FirstArg, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "format" << 3 << FirstArgExpr->getSourceRange();
+ return;
+ }
+
+ // check if the function is variadic if the 3rd argument non-zero
+ if (FirstArg != 0) {
+ if (isFunctionOrMethodVariadic(d)) {
+ ++NumArgs; // +1 for ...
+ } else {
+ S.Diag(d->getLocation(), diag::err_format_attribute_requires_variadic);
+ return;
+ }
+ }
+
+ // strftime requires FirstArg to be 0 because it doesn't read from any
+ // variable the input is just the current time + the format string.
+ if (is_strftime) {
+ if (FirstArg != 0) {
+ S.Diag(Attr.getLoc(), diag::err_format_strftime_third_parameter)
+ << FirstArgExpr->getSourceRange();
+ return;
+ }
+ // if 0 it disables parameter checking (to use with e.g. va_list)
+ } else if (FirstArg != 0 && FirstArg != NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "format" << 3 << FirstArgExpr->getSourceRange();
+ return;
+ }
+
+ d->addAttr(::new (S.Context) FormatAttr(std::string(Format, FormatLen),
+ Idx.getZExtValue(), FirstArg.getZExtValue()));
+}
+
+static void HandleTransparentUnionAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // Try to find the underlying union declaration.
+ RecordDecl *RD = 0;
+ TypedefDecl *TD = dyn_cast<TypedefDecl>(d);
+ if (TD && TD->getUnderlyingType()->isUnionType())
+ RD = TD->getUnderlyingType()->getAsUnionType()->getDecl();
+ else
+ RD = dyn_cast<RecordDecl>(d);
+
+ if (!RD || !RD->isUnion()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 1 /*union*/;
+ return;
+ }
+
+ if (!RD->isDefinition()) {
+ S.Diag(Attr.getLoc(),
+ diag::warn_transparent_union_attribute_not_definition);
+ return;
+ }
+
+ RecordDecl::field_iterator Field = RD->field_begin(S.Context),
+ FieldEnd = RD->field_end(S.Context);
+ if (Field == FieldEnd) {
+ S.Diag(Attr.getLoc(), diag::warn_transparent_union_attribute_zero_fields);
+ return;
+ }
+
+ FieldDecl *FirstField = *Field;
+ QualType FirstType = FirstField->getType();
+ if (FirstType->isFloatingType() || FirstType->isVectorType()) {
+ S.Diag(FirstField->getLocation(),
+ diag::warn_transparent_union_attribute_floating);
+ return;
+ }
+
+ uint64_t FirstSize = S.Context.getTypeSize(FirstType);
+ uint64_t FirstAlign = S.Context.getTypeAlign(FirstType);
+ for (; Field != FieldEnd; ++Field) {
+ QualType FieldType = Field->getType();
+ if (S.Context.getTypeSize(FieldType) != FirstSize ||
+ S.Context.getTypeAlign(FieldType) != FirstAlign) {
+ // Warn if we drop the attribute.
+ bool isSize = S.Context.getTypeSize(FieldType) != FirstSize;
+ unsigned FieldBits = isSize? S.Context.getTypeSize(FieldType)
+ : S.Context.getTypeAlign(FieldType);
+ S.Diag(Field->getLocation(),
+ diag::warn_transparent_union_attribute_field_size_align)
+ << isSize << Field->getDeclName() << FieldBits;
+ unsigned FirstBits = isSize? FirstSize : FirstAlign;
+ S.Diag(FirstField->getLocation(),
+ diag::note_transparent_union_first_field_size_align)
+ << isSize << FirstBits;
+ return;
+ }
+ }
+
+ RD->addAttr(::new (S.Context) TransparentUnionAttr());
+}
+
+static void HandleAnnotateAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ Expr *argExpr = static_cast<Expr *>(Attr.getArg(0));
+ StringLiteral *SE = dyn_cast<StringLiteral>(argExpr);
+
+ // Make sure that there is a string literal as the annotation's single
+ // argument.
+ if (!SE) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_annotate_no_string);
+ return;
+ }
+ d->addAttr(::new (S.Context) AnnotateAttr(std::string(SE->getStrData(),
+ SE->getByteLength())));
+}
+
+static void HandleAlignedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ unsigned Align = 0;
+ if (Attr.getNumArgs() == 0) {
+ // FIXME: This should be the target specific maximum alignment.
+ // (For now we just use 128 bits which is the maximum on X86).
+ Align = 128;
+ d->addAttr(::new (S.Context) AlignedAttr(Align));
+ return;
+ }
+
+ Expr *alignmentExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt Alignment(32);
+ if (!alignmentExpr->isIntegerConstantExpr(Alignment, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "aligned" << alignmentExpr->getSourceRange();
+ return;
+ }
+ if (!llvm::isPowerOf2_64(Alignment.getZExtValue())) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_aligned_not_power_of_two)
+ << alignmentExpr->getSourceRange();
+ return;
+ }
+
+ d->addAttr(::new (S.Context) AlignedAttr(Alignment.getZExtValue() * 8));
+}
+
+/// HandleModeAttr - This attribute modifies the width of a decl with
+/// primitive type.
+///
+/// Despite what would be logical, the mode attribute is a decl attribute,
+/// not a type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make
+/// 'G' be HImode, not an intermediate pointer.
+///
+static void HandleModeAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // This attribute isn't documented, but glibc uses it. It changes
+ // the width of an int or unsigned int to the specified size.
+
+ // Check that there aren't any arguments
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ IdentifierInfo *Name = Attr.getParameterName();
+ if (!Name) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_missing_parameter_name);
+ return;
+ }
+ const char *Str = Name->getName();
+ unsigned Len = Name->getLength();
+
+ // Normalize the attribute name, __foo__ becomes foo.
+ if (Len > 4 && Str[0] == '_' && Str[1] == '_' &&
+ Str[Len - 2] == '_' && Str[Len - 1] == '_') {
+ Str += 2;
+ Len -= 4;
+ }
+
+ unsigned DestWidth = 0;
+ bool IntegerMode = true;
+ bool ComplexMode = false;
+ switch (Len) {
+ case 2:
+ switch (Str[0]) {
+ case 'Q': DestWidth = 8; break;
+ case 'H': DestWidth = 16; break;
+ case 'S': DestWidth = 32; break;
+ case 'D': DestWidth = 64; break;
+ case 'X': DestWidth = 96; break;
+ case 'T': DestWidth = 128; break;
+ }
+ if (Str[1] == 'F') {
+ IntegerMode = false;
+ } else if (Str[1] == 'C') {
+ IntegerMode = false;
+ ComplexMode = true;
+ } else if (Str[1] != 'I') {
+ DestWidth = 0;
+ }
+ break;
+ case 4:
+ // FIXME: glibc uses 'word' to define register_t; this is narrower than a
+ // pointer on PIC16 and other embedded platforms.
+ if (!memcmp(Str, "word", 4))
+ DestWidth = S.Context.Target.getPointerWidth(0);
+ if (!memcmp(Str, "byte", 4))
+ DestWidth = S.Context.Target.getCharWidth();
+ break;
+ case 7:
+ if (!memcmp(Str, "pointer", 7))
+ DestWidth = S.Context.Target.getPointerWidth(0);
+ break;
+ }
+
+ QualType OldTy;
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D))
+ OldTy = TD->getUnderlyingType();
+ else if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ OldTy = VD->getType();
+ else {
+ S.Diag(D->getLocation(), diag::err_attr_wrong_decl)
+ << "mode" << SourceRange(Attr.getLoc(), Attr.getLoc());
+ return;
+ }
+
+ if (!OldTy->getAsBuiltinType() && !OldTy->isComplexType())
+ S.Diag(Attr.getLoc(), diag::err_mode_not_primitive);
+ else if (IntegerMode) {
+ if (!OldTy->isIntegralType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ } else if (ComplexMode) {
+ if (!OldTy->isComplexType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ } else {
+ if (!OldTy->isFloatingType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ }
+
+ // FIXME: Sync this with InitializePredefinedMacros; we need to match int8_t
+ // and friends, at least with glibc.
+ // FIXME: Make sure 32/64-bit integers don't get defined to types of the wrong
+ // width on unusual platforms.
+ // FIXME: Make sure floating-point mappings are accurate
+ // FIXME: Support XF and TF types
+ QualType NewTy;
+ switch (DestWidth) {
+ case 0:
+ S.Diag(Attr.getLoc(), diag::err_unknown_machine_mode) << Name;
+ return;
+ default:
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ case 8:
+ if (!IntegerMode) {
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ }
+ if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.SignedCharTy;
+ else
+ NewTy = S.Context.UnsignedCharTy;
+ break;
+ case 16:
+ if (!IntegerMode) {
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ }
+ if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.ShortTy;
+ else
+ NewTy = S.Context.UnsignedShortTy;
+ break;
+ case 32:
+ if (!IntegerMode)
+ NewTy = S.Context.FloatTy;
+ else if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.IntTy;
+ else
+ NewTy = S.Context.UnsignedIntTy;
+ break;
+ case 64:
+ if (!IntegerMode)
+ NewTy = S.Context.DoubleTy;
+ else if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.LongLongTy;
+ else
+ NewTy = S.Context.UnsignedLongLongTy;
+ break;
+ case 96:
+ NewTy = S.Context.LongDoubleTy;
+ break;
+ case 128:
+ if (!IntegerMode) {
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ }
+ NewTy = S.Context.getFixedWidthIntType(128, OldTy->isSignedIntegerType());
+ break;
+ }
+
+ if (ComplexMode) {
+ NewTy = S.Context.getComplexType(NewTy);
+ }
+
+ // Install the new type.
+ if (TypedefDecl *TD = dyn_cast<TypedefDecl>(D))
+ TD->setUnderlyingType(NewTy);
+ else
+ cast<ValueDecl>(D)->setType(NewTy);
+}
+
+static void HandleNodebugAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isFunctionOrMethod(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) NodebugAttr());
+}
+
+static void HandleNoinlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ d->addAttr(::new (S.Context) NoinlineAttr());
+}
+
+static void HandleGNUInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ FunctionDecl *Fn = dyn_cast<FunctionDecl>(d);
+ if (Fn == 0) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ if (!Fn->isInline()) {
+ S.Diag(Attr.getLoc(), diag::warn_gnu_inline_attribute_requires_inline);
+ return;
+ }
+
+ d->addAttr(::new (S.Context) GNUInlineAttr());
+}
+
+static void HandleRegparmAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ if (!isFunctionOrMethod(d)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 0 /*function*/;
+ return;
+ }
+
+ Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt NumParams(32);
+ if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "regparm" << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ if (S.Context.Target.getRegParmMax() == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ if (NumParams.getLimitedValue(255) > S.Context.Target.getRegParmMax()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number)
+ << S.Context.Target.getRegParmMax() << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ d->addAttr(::new (S.Context) RegparmAttr(NumParams.getZExtValue()));
+}
+
+//===----------------------------------------------------------------------===//
+// Checker-specific attribute handlers.
+//===----------------------------------------------------------------------===//
+
+static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+
+ QualType RetTy;
+
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(d))
+ RetTy = MD->getResultType();
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(d))
+ RetTy = FD->getResultType();
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 3 /* function or method */;
+ return;
+ }
+
+ if (!(S.Context.isObjCNSObjectType(RetTy) || RetTy->getAsPointerType())) {
+ S.Diag(Attr.getLoc(), diag::warn_ns_attribute_wrong_return_type)
+ << Attr.getName();
+ return;
+ }
+
+ switch (Attr.getKind()) {
+ default:
+ assert(0 && "invalid ownership attribute");
+ return;
+ case AttributeList::AT_cf_returns_retained:
+ d->addAttr(::new (S.Context) CFReturnsRetainedAttr());
+ return;
+ case AttributeList::AT_ns_returns_retained:
+ d->addAttr(::new (S.Context) NSReturnsRetainedAttr());
+ return;
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Sema Entry Points
+//===----------------------------------------------------------------------===//
+
+/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
+/// the attribute applies to decls. If the attribute is a type attribute, just
+/// silently ignore it.
+static void ProcessDeclAttribute(Decl *D, const AttributeList &Attr, Sema &S) {
+ switch (Attr.getKind()) {
+ case AttributeList::AT_IBOutlet: HandleIBOutletAttr (D, Attr, S); break;
+ case AttributeList::AT_address_space:
+ case AttributeList::AT_objc_gc:
+ // Ignore these, these are type attributes, handled by ProcessTypeAttributes.
+ break;
+ case AttributeList::AT_alias: HandleAliasAttr (D, Attr, S); break;
+ case AttributeList::AT_aligned: HandleAlignedAttr (D, Attr, S); break;
+ case AttributeList::AT_always_inline:
+ HandleAlwaysInlineAttr (D, Attr, S); break;
+ case AttributeList::AT_analyzer_noreturn:
+ HandleAnalyzerNoReturnAttr (D, Attr, S); break;
+ case AttributeList::AT_annotate: HandleAnnotateAttr (D, Attr, S); break;
+ case AttributeList::AT_constructor: HandleConstructorAttr(D, Attr, S); break;
+ case AttributeList::AT_deprecated: HandleDeprecatedAttr(D, Attr, S); break;
+ case AttributeList::AT_destructor: HandleDestructorAttr(D, Attr, S); break;
+ case AttributeList::AT_dllexport: HandleDLLExportAttr (D, Attr, S); break;
+ case AttributeList::AT_dllimport: HandleDLLImportAttr (D, Attr, S); break;
+ case AttributeList::AT_ext_vector_type:
+ HandleExtVectorTypeAttr(D, Attr, S);
+ break;
+ case AttributeList::AT_fastcall: HandleFastCallAttr (D, Attr, S); break;
+ case AttributeList::AT_format: HandleFormatAttr (D, Attr, S); break;
+ case AttributeList::AT_format_arg: HandleFormatArgAttr (D, Attr, S); break;
+ case AttributeList::AT_gnu_inline: HandleGNUInlineAttr(D, Attr, S); break;
+ case AttributeList::AT_mode: HandleModeAttr (D, Attr, S); break;
+ case AttributeList::AT_nonnull: HandleNonNullAttr (D, Attr, S); break;
+ case AttributeList::AT_noreturn: HandleNoReturnAttr (D, Attr, S); break;
+ case AttributeList::AT_nothrow: HandleNothrowAttr (D, Attr, S); break;
+
+ // Checker-specific.
+ case AttributeList::AT_ns_returns_retained:
+ case AttributeList::AT_cf_returns_retained:
+ HandleNSReturnsRetainedAttr(D, Attr, S); break;
+
+ case AttributeList::AT_packed: HandlePackedAttr (D, Attr, S); break;
+ case AttributeList::AT_section: HandleSectionAttr (D, Attr, S); break;
+ case AttributeList::AT_stdcall: HandleStdCallAttr (D, Attr, S); break;
+ case AttributeList::AT_unavailable: HandleUnavailableAttr(D, Attr, S); break;
+ case AttributeList::AT_unused: HandleUnusedAttr (D, Attr, S); break;
+ case AttributeList::AT_used: HandleUsedAttr (D, Attr, S); break;
+ case AttributeList::AT_vector_size: HandleVectorSizeAttr(D, Attr, S); break;
+ case AttributeList::AT_visibility: HandleVisibilityAttr(D, Attr, S); break;
+ case AttributeList::AT_warn_unused_result: HandleWarnUnusedResult(D,Attr,S);
+ break;
+ case AttributeList::AT_weak: HandleWeakAttr (D, Attr, S); break;
+ case AttributeList::AT_weak_import: HandleWeakImportAttr(D, Attr, S); break;
+ case AttributeList::AT_transparent_union:
+ HandleTransparentUnionAttr(D, Attr, S);
+ break;
+ case AttributeList::AT_objc_exception:
+ HandleObjCExceptionAttr(D, Attr, S);
+ break;
+ case AttributeList::AT_overloadable:HandleOverloadableAttr(D, Attr, S); break;
+ case AttributeList::AT_nsobject: HandleObjCNSObject (D, Attr, S); break;
+ case AttributeList::AT_blocks: HandleBlocksAttr (D, Attr, S); break;
+ case AttributeList::AT_sentinel: HandleSentinelAttr (D, Attr, S); break;
+ case AttributeList::AT_const: HandleConstAttr (D, Attr, S); break;
+ case AttributeList::AT_pure: HandlePureAttr (D, Attr, S); break;
+ case AttributeList::AT_cleanup: HandleCleanupAttr (D, Attr, S); break;
+ case AttributeList::AT_nodebug: HandleNodebugAttr (D, Attr, S); break;
+ case AttributeList::AT_noinline: HandleNoinlineAttr (D, Attr, S); break;
+ case AttributeList::AT_regparm: HandleRegparmAttr (D, Attr, S); break;
+ case AttributeList::IgnoredAttribute:
+ case AttributeList::AT_no_instrument_function: // Interacts with -pg.
+ // Just ignore
+ break;
+ default:
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ break;
+ }
+}
+
+/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
+/// attribute list to the specified decl, ignoring any type attributes.
+void Sema::ProcessDeclAttributeList(Decl *D, const AttributeList *AttrList) {
+ while (AttrList) {
+ ProcessDeclAttribute(D, *AttrList, *this);
+ AttrList = AttrList->getNext();
+ }
+}
+
+/// ProcessDeclAttributes - Given a declarator (PD) with attributes indicated in
+/// it, apply them to D. This is a bit tricky because PD can have attributes
+/// specified in many different places, and we need to find and apply them all.
+void Sema::ProcessDeclAttributes(Decl *D, const Declarator &PD) {
+ // Apply decl attributes from the DeclSpec if present.
+ if (const AttributeList *Attrs = PD.getDeclSpec().getAttributes())
+ ProcessDeclAttributeList(D, Attrs);
+
+ // Walk the declarator structure, applying decl attributes that were in a type
+ // position to the decl itself. This handles cases like:
+ // int *__attr__(x)** D;
+ // when X is a decl attribute.
+ for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i)
+ if (const AttributeList *Attrs = PD.getTypeObject(i).getAttrs())
+ ProcessDeclAttributeList(D, Attrs);
+
+ // Finally, apply any attributes on the decl itself.
+ if (const AttributeList *Attrs = PD.getAttributes())
+ ProcessDeclAttributeList(D, Attrs);
+}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
new file mode 100644
index 0000000..f13179f
--- /dev/null
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -0,0 +1,2823 @@
+//===------ SemaDeclCXX.cpp - Semantic Analysis for C++ Declarations ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "SemaInherit.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/DeclSpec.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm> // for std::equal
+#include <map>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// CheckDefaultArgumentVisitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses
+ /// the default argument of a parameter to determine whether it
+ /// contains any ill-formed subexpressions. For example, this will
+ /// diagnose the use of local variables or parameters within the
+ /// default argument expression.
+ class VISIBILITY_HIDDEN CheckDefaultArgumentVisitor
+ : public StmtVisitor<CheckDefaultArgumentVisitor, bool> {
+ Expr *DefaultArg;
+ Sema *S;
+
+ public:
+ CheckDefaultArgumentVisitor(Expr *defarg, Sema *s)
+ : DefaultArg(defarg), S(s) {}
+
+ bool VisitExpr(Expr *Node);
+ bool VisitDeclRefExpr(DeclRefExpr *DRE);
+ bool VisitCXXThisExpr(CXXThisExpr *ThisE);
+ };
+
+ /// VisitExpr - Visit all of the children of this expression.
+ bool CheckDefaultArgumentVisitor::VisitExpr(Expr *Node) {
+ bool IsInvalid = false;
+ for (Stmt::child_iterator I = Node->child_begin(),
+ E = Node->child_end(); I != E; ++I)
+ IsInvalid |= Visit(*I);
+ return IsInvalid;
+ }
+
+ /// VisitDeclRefExpr - Visit a reference to a declaration, to
+ /// determine whether this declaration can be used in the default
+ /// argument expression.
+ bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(DeclRefExpr *DRE) {
+ NamedDecl *Decl = DRE->getDecl();
+ if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p9
+ // Default arguments are evaluated each time the function is
+ // called. The order of evaluation of function arguments is
+ // unspecified. Consequently, parameters of a function shall not
+ // be used in default argument expressions, even if they are not
+ // evaluated. Parameters of a function declared before a default
+ // argument expression are in scope and can hide namespace and
+ // class member names.
+ return S->Diag(DRE->getSourceRange().getBegin(),
+ diag::err_param_default_argument_references_param)
+ << Param->getDeclName() << DefaultArg->getSourceRange();
+ } else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p7
+ // Local variables shall not be used in default argument
+ // expressions.
+ if (VDecl->isBlockVarDecl())
+ return S->Diag(DRE->getSourceRange().getBegin(),
+ diag::err_param_default_argument_references_local)
+ << VDecl->getDeclName() << DefaultArg->getSourceRange();
+ }
+
+ return false;
+ }
+
+ /// VisitCXXThisExpr - Visit a C++ "this" expression.
+ bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(CXXThisExpr *ThisE) {
+ // C++ [dcl.fct.default]p8:
+ // The keyword this shall not be used in a default argument of a
+ // member function.
+ return S->Diag(ThisE->getSourceRange().getBegin(),
+ diag::err_param_default_argument_references_this)
+ << ThisE->getSourceRange();
+ }
+}
+
+/// ActOnParamDefaultArgument - Check whether the default argument
+/// provided for a function parameter is well-formed. If so, attach it
+/// to the parameter declaration.
+void
+Sema::ActOnParamDefaultArgument(DeclPtrTy param, SourceLocation EqualLoc,
+ ExprArg defarg) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(param.getAs<Decl>());
+ ExprOwningPtr<Expr> DefaultArg(this, defarg.takeAs<Expr>());
+ QualType ParamType = Param->getType();
+
+ // Default arguments are only permitted in C++
+ if (!getLangOptions().CPlusPlus) {
+ Diag(EqualLoc, diag::err_param_default_argument)
+ << DefaultArg->getSourceRange();
+ Param->setInvalidDecl();
+ return;
+ }
+
+ // C++ [dcl.fct.default]p5
+ // A default argument expression is implicitly converted (clause
+ // 4) to the parameter type. The default argument expression has
+ // the same semantic constraints as the initializer expression in
+ // a declaration of a variable of the parameter type, using the
+ // copy-initialization semantics (8.5).
+ Expr *DefaultArgPtr = DefaultArg.get();
+ bool DefaultInitFailed = CheckInitializerTypes(DefaultArgPtr, ParamType,
+ EqualLoc,
+ Param->getDeclName(),
+ /*DirectInit=*/false);
+ if (DefaultArgPtr != DefaultArg.get()) {
+ DefaultArg.take();
+ DefaultArg.reset(DefaultArgPtr);
+ }
+ if (DefaultInitFailed) {
+ return;
+ }
+
+ // Check that the default argument is well-formed
+ CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg.get(), this);
+ if (DefaultArgChecker.Visit(DefaultArg.get())) {
+ Param->setInvalidDecl();
+ return;
+ }
+
+ // Okay: add the default argument to the parameter
+ Param->setDefaultArg(DefaultArg.take());
+}
+
+/// ActOnParamUnparsedDefaultArgument - We've seen a default
+/// argument for a function parameter, but we can't parse it yet
+/// because we're inside a class definition. Note that this default
+/// argument will be parsed later.
+void Sema::ActOnParamUnparsedDefaultArgument(DeclPtrTy param,
+ SourceLocation EqualLoc) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(param.getAs<Decl>());
+ if (Param)
+ Param->setUnparsedDefaultArg();
+}
+
+/// ActOnParamDefaultArgumentError - Parsing or semantic analysis of
+/// the default argument for the parameter param failed.
+void Sema::ActOnParamDefaultArgumentError(DeclPtrTy param) {
+ cast<ParmVarDecl>(param.getAs<Decl>())->setInvalidDecl();
+}
+
+/// CheckExtraCXXDefaultArguments - Check for any extra default
+/// arguments in the declarator, which is not a function declaration
+/// or definition and therefore is not permitted to have default
+/// arguments. This routine should be invoked for every declarator
+/// that is not a function declaration or definition.
+void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
+ // C++ [dcl.fct.default]p3
+ // A default argument expression shall be specified only in the
+ // parameter-declaration-clause of a function declaration or in a
+ // template-parameter (14.1). It shall not be specified for a
+ // parameter pack. If it is specified in a
+ // parameter-declaration-clause, it shall not occur within a
+ // declarator or abstract-declarator of a parameter-declaration.
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = D.getTypeObject(i);
+ if (chunk.Kind == DeclaratorChunk::Function) {
+ for (unsigned argIdx = 0, e = chunk.Fun.NumArgs; argIdx != e; ++argIdx) {
+ ParmVarDecl *Param =
+ cast<ParmVarDecl>(chunk.Fun.ArgInfo[argIdx].Param.getAs<Decl>());
+ if (Param->hasUnparsedDefaultArg()) {
+ CachedTokens *Toks = chunk.Fun.ArgInfo[argIdx].DefaultArgTokens;
+ Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
+ << SourceRange((*Toks)[1].getLocation(), Toks->back().getLocation());
+ delete Toks;
+ chunk.Fun.ArgInfo[argIdx].DefaultArgTokens = 0;
+ } else if (Param->getDefaultArg()) {
+ Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
+ << Param->getDefaultArg()->getSourceRange();
+ Param->setDefaultArg(0);
+ }
+ }
+ }
+ }
+}
+
+// MergeCXXFunctionDecl - Merge two declarations of the same C++
+// function, once we already know that they have the same
+// type. Subroutine of MergeFunctionDecl. Returns true if there was an
+// error, false otherwise.
+bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old) {
+ bool Invalid = false;
+
+ // C++ [dcl.fct.default]p4:
+ //
+ // For non-template functions, default arguments can be added in
+ // later declarations of a function in the same
+ // scope. Declarations in different scopes have completely
+ // distinct sets of default arguments. That is, declarations in
+ // inner scopes do not acquire default arguments from
+ // declarations in outer scopes, and vice versa. In a given
+ // function declaration, all parameters subsequent to a
+ // parameter with a default argument shall have default
+ // arguments supplied in this or previous declarations. A
+ // default argument shall not be redefined by a later
+ // declaration (not even to the same value).
+ for (unsigned p = 0, NumParams = Old->getNumParams(); p < NumParams; ++p) {
+ ParmVarDecl *OldParam = Old->getParamDecl(p);
+ ParmVarDecl *NewParam = New->getParamDecl(p);
+
+ if(OldParam->getDefaultArg() && NewParam->getDefaultArg()) {
+ Diag(NewParam->getLocation(),
+ diag::err_param_default_argument_redefinition)
+ << NewParam->getDefaultArg()->getSourceRange();
+ Diag(OldParam->getLocation(), diag::note_previous_definition);
+ Invalid = true;
+ } else if (OldParam->getDefaultArg()) {
+ // Merge the old default argument into the new parameter
+ NewParam->setDefaultArg(OldParam->getDefaultArg());
+ }
+ }
+
+ return Invalid;
+}
+
+/// CheckCXXDefaultArguments - Verify that the default arguments for a
+/// function declaration are well-formed according to C++
+/// [dcl.fct.default].
+void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
+ unsigned NumParams = FD->getNumParams();
+ unsigned p;
+
+ // Find first parameter with a default argument
+ for (p = 0; p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (Param->getDefaultArg())
+ break;
+ }
+
+ // C++ [dcl.fct.default]p4:
+ // In a given function declaration, all parameters
+ // subsequent to a parameter with a default argument shall
+ // have default arguments supplied in this or previous
+ // declarations. A default argument shall not be redefined
+ // by a later declaration (not even to the same value).
+ unsigned LastMissingDefaultArg = 0;
+ for(; p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (!Param->getDefaultArg()) {
+ if (Param->isInvalidDecl())
+ /* We already complained about this parameter. */;
+ else if (Param->getIdentifier())
+ Diag(Param->getLocation(),
+ diag::err_param_default_argument_missing_name)
+ << Param->getIdentifier();
+ else
+ Diag(Param->getLocation(),
+ diag::err_param_default_argument_missing);
+
+ LastMissingDefaultArg = p;
+ }
+ }
+
+ if (LastMissingDefaultArg > 0) {
+ // Some default arguments were missing. Clear out all of the
+ // default arguments up to (and including) the last missing
+ // default argument, so that we leave the function parameters
+ // in a semantically valid state.
+ for (p = 0; p <= LastMissingDefaultArg; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (Param->getDefaultArg()) {
+ if (!Param->hasUnparsedDefaultArg())
+ Param->getDefaultArg()->Destroy(Context);
+ Param->setDefaultArg(0);
+ }
+ }
+ }
+}
+
+/// isCurrentClassName - Determine whether the identifier II is the
+/// name of the class type currently being defined. In the case of
+/// nested classes, this will only return true if II is the name of
+/// the innermost class.
+bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *,
+ const CXXScopeSpec *SS) {
+ CXXRecordDecl *CurDecl;
+ if (SS && SS->isSet() && !SS->isInvalid()) {
+ DeclContext *DC = computeDeclContext(*SS);
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(DC);
+ } else
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(CurContext);
+
+ if (CurDecl)
+ return &II == CurDecl->getIdentifier();
+ else
+ return false;
+}
+
+/// \brief Check the validity of a C++ base class specifier.
+///
+/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
+/// and returns NULL otherwise.
+CXXBaseSpecifier *
+Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ QualType BaseType,
+ SourceLocation BaseLoc) {
+ // C++ [class.union]p1:
+ // A union shall not have base classes.
+ if (Class->isUnion()) {
+ Diag(Class->getLocation(), diag::err_base_clause_on_union)
+ << SpecifierRange;
+ return 0;
+ }
+
+ if (BaseType->isDependentType())
+ return new CXXBaseSpecifier(SpecifierRange, Virtual,
+ Class->getTagKind() == RecordDecl::TK_class,
+ Access, BaseType);
+
+ // Base specifiers must be record types.
+ if (!BaseType->isRecordType()) {
+ Diag(BaseLoc, diag::err_base_must_be_class) << SpecifierRange;
+ return 0;
+ }
+
+ // C++ [class.union]p1:
+ // A union shall not be used as a base class.
+ if (BaseType->isUnionType()) {
+ Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
+ return 0;
+ }
+
+ // C++ [class.derived]p2:
+ // The class-name in a base-specifier shall not be an incompletely
+ // defined class.
+ if (RequireCompleteType(BaseLoc, BaseType, diag::err_incomplete_base_class,
+ SpecifierRange))
+ return 0;
+
+ // If the base class is polymorphic, the new one is, too.
+ RecordDecl *BaseDecl = BaseType->getAsRecordType()->getDecl();
+ assert(BaseDecl && "Record type has no declaration");
+ BaseDecl = BaseDecl->getDefinition(Context);
+ assert(BaseDecl && "Base type is not incomplete, but has no definition");
+ if (cast<CXXRecordDecl>(BaseDecl)->isPolymorphic())
+ Class->setPolymorphic(true);
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is [...] a class with [...] no base classes [...].
+ Class->setAggregate(false);
+ Class->setPOD(false);
+
+ if (Virtual) {
+ // C++ [class.ctor]p5:
+ // A constructor is trivial if its class has no virtual base classes.
+ Class->setHasTrivialConstructor(false);
+ } else {
+ // C++ [class.ctor]p5:
+ // A constructor is trivial if all the direct base classes of its
+ // class have trivial constructors.
+ Class->setHasTrivialConstructor(cast<CXXRecordDecl>(BaseDecl)->
+ hasTrivialConstructor());
+ }
+
+ // C++ [class.ctor]p3:
+ // A destructor is trivial if all the direct base classes of its class
+ // have trivial destructors.
+ Class->setHasTrivialDestructor(cast<CXXRecordDecl>(BaseDecl)->
+ hasTrivialDestructor());
+
+ // Create the base specifier.
+ // FIXME: Allocate via ASTContext?
+ return new CXXBaseSpecifier(SpecifierRange, Virtual,
+ Class->getTagKind() == RecordDecl::TK_class,
+ Access, BaseType);
+}
+
+/// ActOnBaseSpecifier - Parsed a base specifier. A base specifier is
+/// one entry in the base class list of a class specifier, for
+/// example:
+/// class foo : public bar, virtual private baz {
+/// 'public bar' and 'virtual private baz' are each base-specifiers.
+Sema::BaseResult
+Sema::ActOnBaseSpecifier(DeclPtrTy classdecl, SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeTy *basetype, SourceLocation BaseLoc) {
+ AdjustDeclIfTemplate(classdecl);
+ CXXRecordDecl *Class = cast<CXXRecordDecl>(classdecl.getAs<Decl>());
+ QualType BaseType = QualType::getFromOpaquePtr(basetype);
+ if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
+ Virtual, Access,
+ BaseType, BaseLoc))
+ return BaseSpec;
+
+ return true;
+}
+
+/// \brief Performs the actual work of attaching the given base class
+/// specifiers to a C++ class.
+bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
+ unsigned NumBases) {
+ if (NumBases == 0)
+ return false;
+
+ // Used to keep track of which base types we have already seen, so
+ // that we can properly diagnose redundant direct base types. Note
+ // that the key is always the unqualified canonical type of the base
+ // class.
+ std::map<QualType, CXXBaseSpecifier*, QualTypeOrdering> KnownBaseTypes;
+
+ // Copy non-redundant base specifiers into permanent storage.
+ unsigned NumGoodBases = 0;
+ bool Invalid = false;
+ for (unsigned idx = 0; idx < NumBases; ++idx) {
+ QualType NewBaseType
+ = Context.getCanonicalType(Bases[idx]->getType());
+ NewBaseType = NewBaseType.getUnqualifiedType();
+
+ if (KnownBaseTypes[NewBaseType]) {
+ // C++ [class.mi]p3:
+ // A class shall not be specified as a direct base class of a
+ // derived class more than once.
+ Diag(Bases[idx]->getSourceRange().getBegin(),
+ diag::err_duplicate_base_class)
+ << KnownBaseTypes[NewBaseType]->getType()
+ << Bases[idx]->getSourceRange();
+
+ // Delete the duplicate base class specifier; we're going to
+ // overwrite its pointer later.
+ delete Bases[idx];
+
+ Invalid = true;
+ } else {
+ // Okay, add this new base class.
+ KnownBaseTypes[NewBaseType] = Bases[idx];
+ Bases[NumGoodBases++] = Bases[idx];
+ }
+ }
+
+ // Attach the remaining base class specifiers to the derived class.
+ Class->setBases(Bases, NumGoodBases);
+
+ // Delete the remaining (good) base class specifiers, since their
+ // data has been copied into the CXXRecordDecl.
+ for (unsigned idx = 0; idx < NumGoodBases; ++idx)
+ delete Bases[idx];
+
+ return Invalid;
+}
+
+/// ActOnBaseSpecifiers - Attach the given base specifiers to the
+/// class, after checking whether there are any duplicate base
+/// classes.
+void Sema::ActOnBaseSpecifiers(DeclPtrTy ClassDecl, BaseTy **Bases,
+ unsigned NumBases) {
+ if (!ClassDecl || !Bases || !NumBases)
+ return;
+
+ AdjustDeclIfTemplate(ClassDecl);
+ AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl.getAs<Decl>()),
+ (CXXBaseSpecifier**)(Bases), NumBases);
+}
+
+//===----------------------------------------------------------------------===//
+// C++ class member Handling
+//===----------------------------------------------------------------------===//
+
+/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
+/// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the
+/// bitfield width if there is one and 'InitExpr' specifies the initializer if
+/// any.
+Sema::DeclPtrTy
+Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
+ ExprTy *BW, ExprTy *InitExpr, bool Deleted) {
+ const DeclSpec &DS = D.getDeclSpec();
+ DeclarationName Name = GetNameForDeclarator(D);
+ Expr *BitWidth = static_cast<Expr*>(BW);
+ Expr *Init = static_cast<Expr*>(InitExpr);
+ SourceLocation Loc = D.getIdentifierLoc();
+
+ bool isFunc = D.isFunctionDeclarator();
+
+ // C++ 9.2p6: A member shall not be declared to have automatic storage
+ // duration (auto, register) or with the extern storage-class-specifier.
+ // C++ 7.1.1p8: The mutable specifier can be applied only to names of class
+ // data members and cannot be applied to names declared const or static,
+ // and cannot be applied to reference members.
+ switch (DS.getStorageClassSpec()) {
+ case DeclSpec::SCS_unspecified:
+ case DeclSpec::SCS_typedef:
+ case DeclSpec::SCS_static:
+ // FALL THROUGH.
+ break;
+ case DeclSpec::SCS_mutable:
+ if (isFunc) {
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_function);
+ else
+ Diag(DS.getThreadSpecLoc(), diag::err_mutable_function);
+
+ // FIXME: It would be nicer if the keyword was ignored only for this
+ // declarator. Otherwise we could get follow-up errors.
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ } else {
+ QualType T = GetTypeForDeclarator(D, S);
+ diag::kind err = static_cast<diag::kind>(0);
+ if (T->isReferenceType())
+ err = diag::err_mutable_reference;
+ else if (T.isConstQualified())
+ err = diag::err_mutable_const;
+ if (err != 0) {
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(), err);
+ else
+ Diag(DS.getThreadSpecLoc(), err);
+ // FIXME: It would be nicer if the keyword was ignored only for this
+ // declarator. Otherwise we could get follow-up errors.
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+ }
+ break;
+ default:
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_storageclass_invalid_for_member);
+ else
+ Diag(DS.getThreadSpecLoc(), diag::err_storageclass_invalid_for_member);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+
+ if (!isFunc &&
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_typename &&
+ D.getNumTypeObjects() == 0) {
+ // Check also for this case:
+ //
+ // typedef int f();
+ // f a;
+ //
+ QualType TDType = QualType::getFromOpaquePtr(DS.getTypeRep());
+ isFunc = TDType->isFunctionType();
+ }
+
+ bool isInstField = ((DS.getStorageClassSpec() == DeclSpec::SCS_unspecified ||
+ DS.getStorageClassSpec() == DeclSpec::SCS_mutable) &&
+ !isFunc);
+
+ Decl *Member;
+ if (isInstField) {
+ Member = HandleField(S, cast<CXXRecordDecl>(CurContext), Loc, D, BitWidth,
+ AS);
+ assert(Member && "HandleField never returns null");
+ } else {
+ Member = ActOnDeclarator(S, D).getAs<Decl>();
+ if (!Member) {
+ if (BitWidth) DeleteExpr(BitWidth);
+ return DeclPtrTy();
+ }
+
+ // Non-instance-fields can't have a bitfield.
+ if (BitWidth) {
+ if (Member->isInvalidDecl()) {
+ // don't emit another diagnostic.
+ } else if (isa<VarDecl>(Member)) {
+ // C++ 9.6p3: A bit-field shall not be a static member.
+ // "static member 'A' cannot be a bit-field"
+ Diag(Loc, diag::err_static_not_bitfield)
+ << Name << BitWidth->getSourceRange();
+ } else if (isa<TypedefDecl>(Member)) {
+ // "typedef member 'x' cannot be a bit-field"
+ Diag(Loc, diag::err_typedef_not_bitfield)
+ << Name << BitWidth->getSourceRange();
+ } else {
+ // A function typedef ("typedef int f(); f a;").
+ // C++ 9.6p3: A bit-field shall have integral or enumeration type.
+ Diag(Loc, diag::err_not_integral_type_bitfield)
+ << Name << cast<ValueDecl>(Member)->getType()
+ << BitWidth->getSourceRange();
+ }
+
+ DeleteExpr(BitWidth);
+ BitWidth = 0;
+ Member->setInvalidDecl();
+ }
+
+ Member->setAccess(AS);
+ }
+
+ assert((Name || isInstField) && "No identifier for non-field ?");
+
+ if (Init)
+ AddInitializerToDecl(DeclPtrTy::make(Member), ExprArg(*this, Init), false);
+ if (Deleted) // FIXME: Source location is not very good.
+ SetDeclDeleted(DeclPtrTy::make(Member), D.getSourceRange().getBegin());
+
+ if (isInstField) {
+ FieldCollector->Add(cast<FieldDecl>(Member));
+ return DeclPtrTy();
+ }
+ return DeclPtrTy::make(Member);
+}
+
+/// ActOnMemInitializer - Handle a C++ member initializer.
+Sema::MemInitResult
+Sema::ActOnMemInitializer(DeclPtrTy ConstructorD,
+ Scope *S,
+ IdentifierInfo *MemberOrBase,
+ SourceLocation IdLoc,
+ SourceLocation LParenLoc,
+ ExprTy **Args, unsigned NumArgs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ConstructorD.getAs<Decl>());
+ if (!Constructor) {
+ // The user wrote a constructor initializer on a function that is
+ // not a C++ constructor. Ignore the error for now, because we may
+ // have more member initializers coming; we'll diagnose it just
+ // once in ActOnMemInitializers.
+ return true;
+ }
+
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+
+ // C++ [class.base.init]p2:
+ // Names in a mem-initializer-id are looked up in the scope of the
+ // constructor’s class and, if not found in that scope, are looked
+ // up in the scope containing the constructor’s
+ // definition. [Note: if the constructor’s class contains a member
+ // with the same name as a direct or virtual base class of the
+ // class, a mem-initializer-id naming the member or base class and
+ // composed of a single identifier refers to the class member. A
+ // mem-initializer-id for the hidden base class may be specified
+ // using a qualified name. ]
+ // Look for a member, first.
+ FieldDecl *Member = 0;
+ DeclContext::lookup_result Result
+ = ClassDecl->lookup(Context, MemberOrBase);
+ if (Result.first != Result.second)
+ Member = dyn_cast<FieldDecl>(*Result.first);
+
+ // FIXME: Handle members of an anonymous union.
+
+ if (Member) {
+ // FIXME: Perform direct initialization of the member.
+ return new CXXBaseOrMemberInitializer(Member, (Expr **)Args, NumArgs);
+ }
+
+ // It didn't name a member, so see if it names a class.
+ TypeTy *BaseTy = getTypeName(*MemberOrBase, IdLoc, S, 0/*SS*/);
+ if (!BaseTy)
+ return Diag(IdLoc, diag::err_mem_init_not_member_or_class)
+ << MemberOrBase << SourceRange(IdLoc, RParenLoc);
+
+ QualType BaseType = QualType::getFromOpaquePtr(BaseTy);
+ if (!BaseType->isRecordType())
+ return Diag(IdLoc, diag::err_base_init_does_not_name_class)
+ << BaseType << SourceRange(IdLoc, RParenLoc);
+
+ // C++ [class.base.init]p2:
+ // [...] Unless the mem-initializer-id names a nonstatic data
+ // member of the constructor’s class or a direct or virtual base
+ // of that class, the mem-initializer is ill-formed. A
+ // mem-initializer-list can initialize a base class using any
+ // name that denotes that base class type.
+
+ // First, check for a direct base class.
+ const CXXBaseSpecifier *DirectBaseSpec = 0;
+ for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
+ Base != ClassDecl->bases_end(); ++Base) {
+ if (Context.getCanonicalType(BaseType).getUnqualifiedType() ==
+ Context.getCanonicalType(Base->getType()).getUnqualifiedType()) {
+ // We found a direct base of this type. That's what we're
+ // initializing.
+ DirectBaseSpec = &*Base;
+ break;
+ }
+ }
+
+ // Check for a virtual base class.
+ // FIXME: We might be able to short-circuit this if we know in advance that
+ // there are no virtual bases.
+ const CXXBaseSpecifier *VirtualBaseSpec = 0;
+ if (!DirectBaseSpec || !DirectBaseSpec->isVirtual()) {
+ // We haven't found a base yet; search the class hierarchy for a
+ // virtual base class.
+ BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (IsDerivedFrom(Context.getTypeDeclType(ClassDecl), BaseType, Paths)) {
+ for (BasePaths::paths_iterator Path = Paths.begin();
+ Path != Paths.end(); ++Path) {
+ if (Path->back().Base->isVirtual()) {
+ VirtualBaseSpec = Path->back().Base;
+ break;
+ }
+ }
+ }
+ }
+
+ // C++ [base.class.init]p2:
+ // If a mem-initializer-id is ambiguous because it designates both
+ // a direct non-virtual base class and an inherited virtual base
+ // class, the mem-initializer is ill-formed.
+ if (DirectBaseSpec && VirtualBaseSpec)
+ return Diag(IdLoc, diag::err_base_init_direct_and_virtual)
+ << MemberOrBase << SourceRange(IdLoc, RParenLoc);
+
+ return new CXXBaseOrMemberInitializer(BaseType, (Expr **)Args, NumArgs);
+}
+
+void Sema::ActOnMemInitializers(DeclPtrTy ConstructorDecl,
+ SourceLocation ColonLoc,
+ MemInitTy **MemInits, unsigned NumMemInits) {
+ CXXConstructorDecl *Constructor =
+ dyn_cast<CXXConstructorDecl>(ConstructorDecl.getAs<Decl>());
+
+ if (!Constructor) {
+ Diag(ColonLoc, diag::err_only_constructors_take_base_inits);
+ return;
+ }
+}
+
+namespace {
+ /// PureVirtualMethodCollector - traverses a class and its superclasses
+ /// and determines if it has any pure virtual methods.
+ class VISIBILITY_HIDDEN PureVirtualMethodCollector {
+ ASTContext &Context;
+
+ public:
+ typedef llvm::SmallVector<const CXXMethodDecl*, 8> MethodList;
+
+ private:
+ MethodList Methods;
+
+ void Collect(const CXXRecordDecl* RD, MethodList& Methods);
+
+ public:
+ PureVirtualMethodCollector(ASTContext &Ctx, const CXXRecordDecl* RD)
+ : Context(Ctx) {
+
+ MethodList List;
+ Collect(RD, List);
+
+ // Copy the temporary list to methods, and make sure to ignore any
+ // null entries.
+ for (size_t i = 0, e = List.size(); i != e; ++i) {
+ if (List[i])
+ Methods.push_back(List[i]);
+ }
+ }
+
+ bool empty() const { return Methods.empty(); }
+
+ MethodList::const_iterator methods_begin() { return Methods.begin(); }
+ MethodList::const_iterator methods_end() { return Methods.end(); }
+ };
+
+ void PureVirtualMethodCollector::Collect(const CXXRecordDecl* RD,
+ MethodList& Methods) {
+ // First, collect the pure virtual methods for the base classes.
+ for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
+ BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
+ if (const RecordType *RT = Base->getType()->getAsRecordType()) {
+ const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (BaseDecl && BaseDecl->isAbstract())
+ Collect(BaseDecl, Methods);
+ }
+ }
+
+ // Next, zero out any pure virtual methods that this class overrides.
+ typedef llvm::SmallPtrSet<const CXXMethodDecl*, 4> MethodSetTy;
+
+ MethodSetTy OverriddenMethods;
+ size_t MethodsSize = Methods.size();
+
+ for (RecordDecl::decl_iterator i = RD->decls_begin(Context),
+ e = RD->decls_end(Context);
+ i != e; ++i) {
+ // Traverse the record, looking for methods.
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*i)) {
+ // If the method is pre virtual, add it to the methods vector.
+ if (MD->isPure()) {
+ Methods.push_back(MD);
+ continue;
+ }
+
+ // Otherwise, record all the overridden methods in our set.
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ // Keep track of the overridden methods.
+ OverriddenMethods.insert(*I);
+ }
+ }
+ }
+
+ // Now go through the methods and zero out all the ones we know are
+ // overridden.
+ for (size_t i = 0, e = MethodsSize; i != e; ++i) {
+ if (OverriddenMethods.count(Methods[i]))
+ Methods[i] = 0;
+ }
+
+ }
+}
+
+bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
+ unsigned DiagID, AbstractDiagSelID SelID,
+ const CXXRecordDecl *CurrentRD) {
+
+ if (!getLangOptions().CPlusPlus)
+ return false;
+
+ if (const ArrayType *AT = Context.getAsArrayType(T))
+ return RequireNonAbstractType(Loc, AT->getElementType(), DiagID, SelID,
+ CurrentRD);
+
+ if (const PointerType *PT = T->getAsPointerType()) {
+ // Find the innermost pointer type.
+ while (const PointerType *T = PT->getPointeeType()->getAsPointerType())
+ PT = T;
+
+ if (const ArrayType *AT = Context.getAsArrayType(PT->getPointeeType()))
+ return RequireNonAbstractType(Loc, AT->getElementType(), DiagID, SelID,
+ CurrentRD);
+ }
+
+ const RecordType *RT = T->getAsRecordType();
+ if (!RT)
+ return false;
+
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+
+ if (CurrentRD && CurrentRD != RD)
+ return false;
+
+ if (!RD->isAbstract())
+ return false;
+
+ Diag(Loc, DiagID) << RD->getDeclName() << SelID;
+
+ // Check if we've already emitted the list of pure virtual functions for this
+ // class.
+ if (PureVirtualClassDiagSet && PureVirtualClassDiagSet->count(RD))
+ return true;
+
+ PureVirtualMethodCollector Collector(Context, RD);
+
+ for (PureVirtualMethodCollector::MethodList::const_iterator I =
+ Collector.methods_begin(), E = Collector.methods_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ Diag(MD->getLocation(), diag::note_pure_virtual_function) <<
+ MD->getDeclName();
+ }
+
+ if (!PureVirtualClassDiagSet)
+ PureVirtualClassDiagSet.reset(new RecordDeclSetTy);
+ PureVirtualClassDiagSet->insert(RD);
+
+ return true;
+}
+
+namespace {
+ class VISIBILITY_HIDDEN AbstractClassUsageDiagnoser
+ : public DeclVisitor<AbstractClassUsageDiagnoser, bool> {
+ Sema &SemaRef;
+ CXXRecordDecl *AbstractClass;
+
+ bool VisitDeclContext(const DeclContext *DC) {
+ bool Invalid = false;
+
+ for (CXXRecordDecl::decl_iterator I = DC->decls_begin(SemaRef.Context),
+ E = DC->decls_end(SemaRef.Context); I != E; ++I)
+ Invalid |= Visit(*I);
+
+ return Invalid;
+ }
+
+ public:
+ AbstractClassUsageDiagnoser(Sema& SemaRef, CXXRecordDecl *ac)
+ : SemaRef(SemaRef), AbstractClass(ac) {
+ Visit(SemaRef.Context.getTranslationUnitDecl());
+ }
+
+ bool VisitFunctionDecl(const FunctionDecl *FD) {
+ if (FD->isThisDeclarationADefinition()) {
+ // No need to do the check if we're in a definition, because it requires
+ // that the return/param types are complete.
+ // because that requires
+ return VisitDeclContext(FD);
+ }
+
+ // Check the return type.
+ QualType RTy = FD->getType()->getAsFunctionType()->getResultType();
+ bool Invalid =
+ SemaRef.RequireNonAbstractType(FD->getLocation(), RTy,
+ diag::err_abstract_type_in_decl,
+ Sema::AbstractReturnType,
+ AbstractClass);
+
+ for (FunctionDecl::param_const_iterator I = FD->param_begin(),
+ E = FD->param_end(); I != E; ++I) {
+ const ParmVarDecl *VD = *I;
+ Invalid |=
+ SemaRef.RequireNonAbstractType(VD->getLocation(),
+ VD->getOriginalType(),
+ diag::err_abstract_type_in_decl,
+ Sema::AbstractParamType,
+ AbstractClass);
+ }
+
+ return Invalid;
+ }
+
+ bool VisitDecl(const Decl* D) {
+ if (const DeclContext *DC = dyn_cast<DeclContext>(D))
+ return VisitDeclContext(DC);
+
+ return false;
+ }
+ };
+}
+
+void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
+ DeclPtrTy TagDecl,
+ SourceLocation LBrac,
+ SourceLocation RBrac) {
+ AdjustDeclIfTemplate(TagDecl);
+ ActOnFields(S, RLoc, TagDecl,
+ (DeclPtrTy*)FieldCollector->getCurFields(),
+ FieldCollector->getCurNumFields(), LBrac, RBrac, 0);
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(TagDecl.getAs<Decl>());
+ if (!RD->isAbstract()) {
+ // Collect all the pure virtual methods and see if this is an abstract
+ // class after all.
+ PureVirtualMethodCollector Collector(Context, RD);
+ if (!Collector.empty())
+ RD->setAbstract(true);
+ }
+
+ if (RD->isAbstract())
+ AbstractClassUsageDiagnoser(*this, RD);
+
+ if (RD->hasTrivialConstructor() || RD->hasTrivialDestructor()) {
+ for (RecordDecl::field_iterator i = RD->field_begin(Context),
+ e = RD->field_end(Context); i != e; ++i) {
+ // All the nonstatic data members must have trivial constructors.
+ QualType FTy = i->getType();
+ while (const ArrayType *AT = Context.getAsArrayType(FTy))
+ FTy = AT->getElementType();
+
+ if (const RecordType *RT = FTy->getAsRecordType()) {
+ CXXRecordDecl *FieldRD = cast<CXXRecordDecl>(RT->getDecl());
+
+ if (!FieldRD->hasTrivialConstructor())
+ RD->setHasTrivialConstructor(false);
+ if (!FieldRD->hasTrivialDestructor())
+ RD->setHasTrivialDestructor(false);
+
+ // If RD has neither a trivial constructor nor a trivial destructor
+ // we don't need to continue checking.
+ if (!RD->hasTrivialConstructor() && !RD->hasTrivialDestructor())
+ break;
+ }
+ }
+ }
+
+ if (!RD->isDependentType())
+ AddImplicitlyDeclaredMembersToClass(RD);
+}
+
+/// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared
+/// special functions, such as the default constructor, copy
+/// constructor, or destructor, to the given C++ class (C++
+/// [special]p1). This routine can only be executed just before the
+/// definition of the class is complete.
+void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
+ QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ ClassType = Context.getCanonicalType(ClassType);
+
+ // FIXME: Implicit declarations have exception specifications, which are
+ // the union of the specifications of the implicitly called functions.
+
+ if (!ClassDecl->hasUserDeclaredConstructor()) {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class X
+ // that can be called without an argument. If there is no
+ // user-declared constructor for class X, a default constructor is
+ // implicitly declared. An implicitly-declared default constructor
+ // is an inline public member of its class.
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(ClassType);
+ CXXConstructorDecl *DefaultCon =
+ CXXConstructorDecl::Create(Context, ClassDecl,
+ ClassDecl->getLocation(), Name,
+ Context.getFunctionType(Context.VoidTy,
+ 0, 0, false, 0),
+ /*isExplicit=*/false,
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ DefaultCon->setAccess(AS_public);
+ DefaultCon->setImplicit();
+ ClassDecl->addDecl(Context, DefaultCon);
+
+ // Notify the class that we've added a constructor.
+ ClassDecl->addedConstructor(Context, DefaultCon);
+ }
+
+ if (!ClassDecl->hasUserDeclaredCopyConstructor()) {
+ // C++ [class.copy]p4:
+ // If the class definition does not explicitly declare a copy
+ // constructor, one is declared implicitly.
+
+ // C++ [class.copy]p5:
+ // The implicitly-declared copy constructor for a class X will
+ // have the form
+ //
+ // X::X(const X&)
+ //
+ // if
+ bool HasConstCopyConstructor = true;
+
+ // -- each direct or virtual base class B of X has a copy
+ // constructor whose first parameter is of type const B& or
+ // const volatile B&, and
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin();
+ HasConstCopyConstructor && Base != ClassDecl->bases_end(); ++Base) {
+ const CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAsRecordType()->getDecl());
+ HasConstCopyConstructor
+ = BaseClassDecl->hasConstCopyConstructor(Context);
+ }
+
+ // -- for all the nonstatic data members of X that are of a
+ // class type M (or array thereof), each such class type
+ // has a copy constructor whose first parameter is of type
+ // const M& or const volatile M&.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(Context);
+ HasConstCopyConstructor && Field != ClassDecl->field_end(Context);
+ ++Field) {
+ QualType FieldType = (*Field)->getType();
+ if (const ArrayType *Array = Context.getAsArrayType(FieldType))
+ FieldType = Array->getElementType();
+ if (const RecordType *FieldClassType = FieldType->getAsRecordType()) {
+ const CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+ HasConstCopyConstructor
+ = FieldClassDecl->hasConstCopyConstructor(Context);
+ }
+ }
+
+ // Otherwise, the implicitly declared copy constructor will have
+ // the form
+ //
+ // X::X(X&)
+ QualType ArgType = ClassType;
+ if (HasConstCopyConstructor)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ // An implicitly-declared copy constructor is an inline public
+ // member of its class.
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(ClassType);
+ CXXConstructorDecl *CopyConstructor
+ = CXXConstructorDecl::Create(Context, ClassDecl,
+ ClassDecl->getLocation(), Name,
+ Context.getFunctionType(Context.VoidTy,
+ &ArgType, 1,
+ false, 0),
+ /*isExplicit=*/false,
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ CopyConstructor->setAccess(AS_public);
+ CopyConstructor->setImplicit();
+
+ // Add the parameter to the constructor.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor,
+ ClassDecl->getLocation(),
+ /*IdentifierInfo=*/0,
+ ArgType, VarDecl::None, 0);
+ CopyConstructor->setParams(Context, &FromParam, 1);
+
+ ClassDecl->addedConstructor(Context, CopyConstructor);
+ ClassDecl->addDecl(Context, CopyConstructor);
+ }
+
+ if (!ClassDecl->hasUserDeclaredCopyAssignment()) {
+ // Note: The following rules are largely analoguous to the copy
+ // constructor rules. Note that virtual bases are not taken into account
+ // for determining the argument type of the operator. Note also that
+ // operators taking an object instead of a reference are allowed.
+ //
+ // C++ [class.copy]p10:
+ // If the class definition does not explicitly declare a copy
+ // assignment operator, one is declared implicitly.
+ // The implicitly-defined copy assignment operator for a class X
+ // will have the form
+ //
+ // X& X::operator=(const X&)
+ //
+ // if
+ bool HasConstCopyAssignment = true;
+
+ // -- each direct base class B of X has a copy assignment operator
+ // whose parameter is of type const B&, const volatile B& or B,
+ // and
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin();
+ HasConstCopyAssignment && Base != ClassDecl->bases_end(); ++Base) {
+ const CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAsRecordType()->getDecl());
+ HasConstCopyAssignment = BaseClassDecl->hasConstCopyAssignment(Context);
+ }
+
+ // -- for all the nonstatic data members of X that are of a class
+ // type M (or array thereof), each such class type has a copy
+ // assignment operator whose parameter is of type const M&,
+ // const volatile M& or M.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(Context);
+ HasConstCopyAssignment && Field != ClassDecl->field_end(Context);
+ ++Field) {
+ QualType FieldType = (*Field)->getType();
+ if (const ArrayType *Array = Context.getAsArrayType(FieldType))
+ FieldType = Array->getElementType();
+ if (const RecordType *FieldClassType = FieldType->getAsRecordType()) {
+ const CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+ HasConstCopyAssignment
+ = FieldClassDecl->hasConstCopyAssignment(Context);
+ }
+ }
+
+ // Otherwise, the implicitly declared copy assignment operator will
+ // have the form
+ //
+ // X& X::operator=(X&)
+ QualType ArgType = ClassType;
+ QualType RetType = Context.getLValueReferenceType(ArgType);
+ if (HasConstCopyAssignment)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ // An implicitly-declared copy assignment operator is an inline public
+ // member of its class.
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ CXXMethodDecl *CopyAssignment =
+ CXXMethodDecl::Create(Context, ClassDecl, ClassDecl->getLocation(), Name,
+ Context.getFunctionType(RetType, &ArgType, 1,
+ false, 0),
+ /*isStatic=*/false, /*isInline=*/true);
+ CopyAssignment->setAccess(AS_public);
+ CopyAssignment->setImplicit();
+
+ // Add the parameter to the operator.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
+ ClassDecl->getLocation(),
+ /*IdentifierInfo=*/0,
+ ArgType, VarDecl::None, 0);
+ CopyAssignment->setParams(Context, &FromParam, 1);
+
+ // Don't call addedAssignmentOperator. There is no way to distinguish an
+ // implicit from an explicit assignment operator.
+ ClassDecl->addDecl(Context, CopyAssignment);
+ }
+
+ if (!ClassDecl->hasUserDeclaredDestructor()) {
+ // C++ [class.dtor]p2:
+ // If a class has no user-declared destructor, a destructor is
+ // declared implicitly. An implicitly-declared destructor is an
+ // inline public member of its class.
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(ClassType);
+ CXXDestructorDecl *Destructor
+ = CXXDestructorDecl::Create(Context, ClassDecl,
+ ClassDecl->getLocation(), Name,
+ Context.getFunctionType(Context.VoidTy,
+ 0, 0, false, 0),
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ Destructor->setAccess(AS_public);
+ Destructor->setImplicit();
+ ClassDecl->addDecl(Context, Destructor);
+ }
+}
+
+void Sema::ActOnReenterTemplateScope(Scope *S, DeclPtrTy TemplateD) {
+ TemplateDecl *Template = TemplateD.getAs<TemplateDecl>();
+ if (!Template)
+ return;
+
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; ++Param) {
+ NamedDecl *Named = cast<NamedDecl>(*Param);
+ if (Named->getDeclName()) {
+ S->AddDecl(DeclPtrTy::make(Named));
+ IdResolver.AddDecl(Named);
+ }
+ }
+}
+
+/// ActOnStartDelayedCXXMethodDeclaration - We have completed
+/// parsing a top-level (non-nested) C++ class, and we are now
+/// parsing those parts of the given Method declaration that could
+/// not be parsed earlier (C++ [class.mem]p2), such as default
+/// arguments. This action should enter the scope of the given
+/// Method declaration as if we had just parsed the qualified method
+/// name. However, it should not bring the parameters into scope;
+/// that will be performed by ActOnDelayedCXXMethodParameter.
+void Sema::ActOnStartDelayedCXXMethodDeclaration(Scope *S, DeclPtrTy MethodD) {
+ CXXScopeSpec SS;
+ FunctionDecl *Method = cast<FunctionDecl>(MethodD.getAs<Decl>());
+ QualType ClassTy
+ = Context.getTypeDeclType(cast<RecordDecl>(Method->getDeclContext()));
+ SS.setScopeRep(
+ NestedNameSpecifier::Create(Context, 0, false, ClassTy.getTypePtr()));
+ ActOnCXXEnterDeclaratorScope(S, SS);
+}
+
+/// ActOnDelayedCXXMethodParameter - We've already started a delayed
+/// C++ method declaration. We're (re-)introducing the given
+/// function parameter into scope for use in parsing later parts of
+/// the method declaration. For example, we could see an
+/// ActOnParamDefaultArgument event for this parameter.
+void Sema::ActOnDelayedCXXMethodParameter(Scope *S, DeclPtrTy ParamD) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(ParamD.getAs<Decl>());
+
+ // If this parameter has an unparsed default argument, clear it out
+ // to make way for the parsed default argument.
+ if (Param->hasUnparsedDefaultArg())
+ Param->setDefaultArg(0);
+
+ S->AddDecl(DeclPtrTy::make(Param));
+ if (Param->getDeclName())
+ IdResolver.AddDecl(Param);
+}
+
+/// ActOnFinishDelayedCXXMethodDeclaration - We have finished
+/// processing the delayed method declaration for Method. The method
+/// declaration is now considered finished. There may be a separate
+/// ActOnStartOfFunctionDef action later (not necessarily
+/// immediately!) for this method, if it was also defined inside the
+/// class body.
+void Sema::ActOnFinishDelayedCXXMethodDeclaration(Scope *S, DeclPtrTy MethodD) {
+ FunctionDecl *Method = cast<FunctionDecl>(MethodD.getAs<Decl>());
+ CXXScopeSpec SS;
+ QualType ClassTy
+ = Context.getTypeDeclType(cast<RecordDecl>(Method->getDeclContext()));
+ SS.setScopeRep(
+ NestedNameSpecifier::Create(Context, 0, false, ClassTy.getTypePtr()));
+ ActOnCXXExitDeclaratorScope(S, SS);
+
+ // Now that we have our default arguments, check the constructor
+ // again. It could produce additional diagnostics or affect whether
+ // the class has implicitly-declared destructors, among other
+ // things.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Method))
+ CheckConstructor(Constructor);
+
+ // Check the default arguments, which we may have added.
+ if (!Method->isInvalidDecl())
+ CheckCXXDefaultArguments(Method);
+}
+
+/// CheckConstructorDeclarator - Called by ActOnDeclarator to check
+/// the well-formedness of the constructor declarator @p D with type @p
+/// R. If there are any errors in the declarator, this routine will
+/// emit diagnostics and set the invalid bit to true. In any case, the type
+/// will be updated to reflect a well-formed type for the constructor and
+/// returned.
+QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
+ FunctionDecl::StorageClass &SC) {
+ bool isVirtual = D.getDeclSpec().isVirtualSpecified();
+
+ // C++ [class.ctor]p3:
+ // A constructor shall not be virtual (10.3) or static (9.4). A
+ // constructor can be invoked for a const, volatile or const
+ // volatile object. A constructor shall not be declared const,
+ // volatile, or const volatile (9.3.2).
+ if (isVirtual) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be)
+ << "virtual" << SourceRange(D.getDeclSpec().getVirtualSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+ if (SC == FunctionDecl::Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ SC = FunctionDecl::None;
+ }
+
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ if (FTI.TypeQuals != 0) {
+ if (FTI.TypeQuals & QualType::Const)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "const" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & QualType::Volatile)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "volatile" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & QualType::Restrict)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "restrict" << SourceRange(D.getIdentifierLoc());
+ }
+
+ // Rebuild the function type "R" without any type qualifiers (in
+ // case any of the errors above fired) and with "void" as the
+ // return type, since constructors don't have return types. We
+ // *always* have to do this, because GetTypeForDeclarator will
+ // put in a result type of "int" when none was specified.
+ const FunctionProtoType *Proto = R->getAsFunctionProtoType();
+ return Context.getFunctionType(Context.VoidTy, Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ Proto->isVariadic(), 0);
+}
+
+/// CheckConstructor - Checks a fully-formed constructor for
+/// well-formedness, issuing any diagnostics required. Returns true if
+/// the constructor declarator is invalid.
+void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
+ CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(Constructor->getDeclContext());
+ if (!ClassDecl)
+ return Constructor->setInvalidDecl();
+
+ // C++ [class.copy]p3:
+ // A declaration of a constructor for a class X is ill-formed if
+ // its first parameter is of type (optionally cv-qualified) X and
+ // either there are no other parameters or else all other
+ // parameters have default arguments.
+ if (!Constructor->isInvalidDecl() &&
+ ((Constructor->getNumParams() == 1) ||
+ (Constructor->getNumParams() > 1 &&
+ Constructor->getParamDecl(1)->getDefaultArg() != 0))) {
+ QualType ParamType = Constructor->getParamDecl(0)->getType();
+ QualType ClassTy = Context.getTagDeclType(ClassDecl);
+ if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
+ SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation();
+ Diag(ParamLoc, diag::err_constructor_byvalue_arg)
+ << CodeModificationHint::CreateInsertion(ParamLoc, " const &");
+ Constructor->setInvalidDecl();
+ }
+ }
+
+ // Notify the class that we've added a constructor.
+ ClassDecl->addedConstructor(Context, Constructor);
+}
+
+static inline bool
+FTIHasSingleVoidArgument(DeclaratorChunk::FunctionTypeInfo &FTI) {
+ return (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
+ FTI.ArgInfo[0].Param &&
+ FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType()->isVoidType());
+}
+
+/// CheckDestructorDeclarator - Called by ActOnDeclarator to check
+/// the well-formednes of the destructor declarator @p D with type @p
+/// R. If there are any errors in the declarator, this routine will
+/// emit diagnostics and set the declarator to invalid. Even if this happens,
+/// will be updated to reflect a well-formed type for the destructor and
+/// returned.
+QualType Sema::CheckDestructorDeclarator(Declarator &D,
+ FunctionDecl::StorageClass& SC) {
+ // C++ [class.dtor]p1:
+ // [...] A typedef-name that names a class is a class-name
+ // (7.1.3); however, a typedef-name that names a class shall not
+ // be used as the identifier in the declarator for a destructor
+ // declaration.
+ QualType DeclaratorType = QualType::getFromOpaquePtr(D.getDeclaratorIdType());
+ if (isa<TypedefType>(DeclaratorType)) {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ << DeclaratorType;
+ D.setInvalidType();
+ }
+
+ // C++ [class.dtor]p2:
+ // A destructor is used to destroy objects of its class type. A
+ // destructor takes no parameters, and no return type can be
+ // specified for it (not even void). The address of a destructor
+ // shall not be taken. A destructor shall not be static. A
+ // destructor can be invoked for a const, volatile or const
+ // volatile object. A destructor shall not be declared const,
+ // volatile or const volatile (9.3.2).
+ if (SC == FunctionDecl::Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_cannot_be)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ SC = FunctionDecl::None;
+ D.setInvalidType();
+ }
+ if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
+ // Destructors don't have return types, but the parser will
+ // happily parse something like:
+ //
+ // class X {
+ // float ~X();
+ // };
+ //
+ // The return type will be eliminated later.
+ Diag(D.getIdentifierLoc(), diag::err_destructor_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ }
+
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(0).Fun;
+ if (FTI.TypeQuals != 0 && !D.isInvalidType()) {
+ if (FTI.TypeQuals & QualType::Const)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "const" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & QualType::Volatile)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "volatile" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & QualType::Restrict)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "restrict" << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+
+ // Make sure we don't have any parameters.
+ if (FTI.NumArgs > 0 && !FTIHasSingleVoidArgument(FTI)) {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_with_params);
+
+ // Delete the parameters.
+ FTI.freeArgs();
+ D.setInvalidType();
+ }
+
+ // Make sure the destructor isn't variadic.
+ if (FTI.isVariadic) {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_variadic);
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any type qualifiers or
+ // parameters (in case any of the errors above fired) and with
+ // "void" as the return type, since destructors don't have return
+ // types. We *always* have to do this, because GetTypeForDeclarator
+ // will put in a result type of "int" when none was specified.
+ return Context.getFunctionType(Context.VoidTy, 0, 0, false, 0);
+}
+
+/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
+/// well-formednes of the conversion function declarator @p D with
+/// type @p R. If there are any errors in the declarator, this routine
+/// will emit diagnostics and return true. Otherwise, it will return
+/// false. Either way, the type @p R will be updated to reflect a
+/// well-formed type for the conversion operator.
+void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
+ FunctionDecl::StorageClass& SC) {
+ // C++ [class.conv.fct]p1:
+ // Neither parameter types nor return type can be specified. The
+ // type of a conversion function (8.3.5) is “function taking no
+ // parameter returning conversion-type-id.”
+ if (SC == FunctionDecl::Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_not_member)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ SC = FunctionDecl::None;
+ }
+ if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
+ // Conversion functions don't have return types, but the parser will
+ // happily parse something like:
+ //
+ // class X {
+ // float operator bool();
+ // };
+ //
+ // The return type will be changed later anyway.
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ }
+
+ // Make sure we don't have any parameters.
+ if (R->getAsFunctionProtoType()->getNumArgs() > 0) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params);
+
+ // Delete the parameters.
+ D.getTypeObject(0).Fun.freeArgs();
+ D.setInvalidType();
+ }
+
+ // Make sure the conversion function isn't variadic.
+ if (R->getAsFunctionProtoType()->isVariadic() && !D.isInvalidType()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_variadic);
+ D.setInvalidType();
+ }
+
+ // C++ [class.conv.fct]p4:
+ // The conversion-type-id shall not represent a function type nor
+ // an array type.
+ QualType ConvType = QualType::getFromOpaquePtr(D.getDeclaratorIdType());
+ if (ConvType->isArrayType()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_to_array);
+ ConvType = Context.getPointerType(ConvType);
+ D.setInvalidType();
+ } else if (ConvType->isFunctionType()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_to_function);
+ ConvType = Context.getPointerType(ConvType);
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any parameters (in case any
+ // of the errors above fired) and with the conversion type as the
+ // return type.
+ R = Context.getFunctionType(ConvType, 0, 0, false,
+ R->getAsFunctionProtoType()->getTypeQuals());
+
+ // C++0x explicit conversion operators.
+ if (D.getDeclSpec().isExplicitSpecified() && !getLangOptions().CPlusPlus0x)
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::warn_explicit_conversion_functions)
+ << SourceRange(D.getDeclSpec().getExplicitSpecLoc());
+}
+
+/// ActOnConversionDeclarator - Called by ActOnDeclarator to complete
+/// the declaration of the given C++ conversion function. This routine
+/// is responsible for recording the conversion function in the C++
+/// class, if possible.
+Sema::DeclPtrTy Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
+ assert(Conversion && "Expected to receive a conversion function declaration");
+
+ // Set the lexical context of this conversion function
+ Conversion->setLexicalDeclContext(CurContext);
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Conversion->getDeclContext());
+
+ // Make sure we aren't redeclaring the conversion function.
+ QualType ConvType = Context.getCanonicalType(Conversion->getConversionType());
+
+ // C++ [class.conv.fct]p1:
+ // [...] A conversion function is never used to convert a
+ // (possibly cv-qualified) object to the (possibly cv-qualified)
+ // same object type (or a reference to it), to a (possibly
+ // cv-qualified) base class of that type (or a reference to it),
+ // or to (possibly cv-qualified) void.
+ // FIXME: Suppress this warning if the conversion function ends up being a
+ // virtual function that overrides a virtual function in a base class.
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ if (const ReferenceType *ConvTypeRef = ConvType->getAsReferenceType())
+ ConvType = ConvTypeRef->getPointeeType();
+ if (ConvType->isRecordType()) {
+ ConvType = Context.getCanonicalType(ConvType).getUnqualifiedType();
+ if (ConvType == ClassType)
+ Diag(Conversion->getLocation(), diag::warn_conv_to_self_not_used)
+ << ClassType;
+ else if (IsDerivedFrom(ClassType, ConvType))
+ Diag(Conversion->getLocation(), diag::warn_conv_to_base_not_used)
+ << ClassType << ConvType;
+ } else if (ConvType->isVoidType()) {
+ Diag(Conversion->getLocation(), diag::warn_conv_to_void_not_used)
+ << ClassType << ConvType;
+ }
+
+ if (Conversion->getPreviousDeclaration()) {
+ OverloadedFunctionDecl *Conversions = ClassDecl->getConversionFunctions();
+ for (OverloadedFunctionDecl::function_iterator
+ Conv = Conversions->function_begin(),
+ ConvEnd = Conversions->function_end();
+ Conv != ConvEnd; ++Conv) {
+ if (*Conv == Conversion->getPreviousDeclaration()) {
+ *Conv = Conversion;
+ return DeclPtrTy::make(Conversion);
+ }
+ }
+ assert(Conversion->isInvalidDecl() && "Conversion should not get here.");
+ } else
+ ClassDecl->addConversionFunction(Context, Conversion);
+
+ return DeclPtrTy::make(Conversion);
+}
+
+//===----------------------------------------------------------------------===//
+// Namespace Handling
+//===----------------------------------------------------------------------===//
+
+/// ActOnStartNamespaceDef - This is called at the start of a namespace
+/// definition.
+Sema::DeclPtrTy Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
+ SourceLocation IdentLoc,
+ IdentifierInfo *II,
+ SourceLocation LBrace) {
+ NamespaceDecl *Namespc =
+ NamespaceDecl::Create(Context, CurContext, IdentLoc, II);
+ Namespc->setLBracLoc(LBrace);
+
+ Scope *DeclRegionScope = NamespcScope->getParent();
+
+ if (II) {
+ // C++ [namespace.def]p2:
+ // The identifier in an original-namespace-definition shall not have been
+ // previously defined in the declarative region in which the
+ // original-namespace-definition appears. The identifier in an
+ // original-namespace-definition is the name of the namespace. Subsequently
+ // in that declarative region, it is treated as an original-namespace-name.
+
+ NamedDecl *PrevDecl = LookupName(DeclRegionScope, II, LookupOrdinaryName,
+ true);
+
+ if (NamespaceDecl *OrigNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl)) {
+ // This is an extended namespace definition.
+ // Attach this namespace decl to the chain of extended namespace
+ // definitions.
+ OrigNS->setNextNamespace(Namespc);
+ Namespc->setOriginalNamespace(OrigNS->getOriginalNamespace());
+
+ // Remove the previous declaration from the scope.
+ if (DeclRegionScope->isDeclScope(DeclPtrTy::make(OrigNS))) {
+ IdResolver.RemoveDecl(OrigNS);
+ DeclRegionScope->RemoveDecl(DeclPtrTy::make(OrigNS));
+ }
+ } else if (PrevDecl) {
+ // This is an invalid name redefinition.
+ Diag(Namespc->getLocation(), diag::err_redefinition_different_kind)
+ << Namespc->getDeclName();
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ Namespc->setInvalidDecl();
+ // Continue on to push Namespc as current DeclContext and return it.
+ }
+
+ PushOnScopeChains(Namespc, DeclRegionScope);
+ } else {
+ // FIXME: Handle anonymous namespaces
+ }
+
+ // Although we could have an invalid decl (i.e. the namespace name is a
+ // redefinition), push it as current DeclContext and try to continue parsing.
+ // FIXME: We should be able to push Namespc here, so that the each DeclContext
+ // for the namespace has the declarations that showed up in that particular
+ // namespace definition.
+ PushDeclContext(NamespcScope, Namespc);
+ return DeclPtrTy::make(Namespc);
+}
+
+/// ActOnFinishNamespaceDef - This callback is called after a namespace is
+/// exited. Decl is the DeclTy returned by ActOnStartNamespaceDef.
+void Sema::ActOnFinishNamespaceDef(DeclPtrTy D, SourceLocation RBrace) {
+ Decl *Dcl = D.getAs<Decl>();
+ NamespaceDecl *Namespc = dyn_cast_or_null<NamespaceDecl>(Dcl);
+ assert(Namespc && "Invalid parameter, expected NamespaceDecl");
+ Namespc->setRBracLoc(RBrace);
+ PopDeclContext();
+}
+
+Sema::DeclPtrTy Sema::ActOnUsingDirective(Scope *S,
+ SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ AttributeList *AttrList) {
+ assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
+ assert(NamespcName && "Invalid NamespcName.");
+ assert(IdentLoc.isValid() && "Invalid NamespceName location.");
+ assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
+
+ UsingDirectiveDecl *UDir = 0;
+
+ // Lookup namespace name.
+ LookupResult R = LookupParsedName(S, &SS, NamespcName,
+ LookupNamespaceName, false);
+ if (R.isAmbiguous()) {
+ DiagnoseAmbiguousLookup(R, NamespcName, IdentLoc);
+ return DeclPtrTy();
+ }
+ if (NamedDecl *NS = R) {
+ assert(isa<NamespaceDecl>(NS) && "expected namespace decl");
+ // C++ [namespace.udir]p1:
+ // A using-directive specifies that the names in the nominated
+ // namespace can be used in the scope in which the
+ // using-directive appears after the using-directive. During
+ // unqualified name lookup (3.4.1), the names appear as if they
+ // were declared in the nearest enclosing namespace which
+ // contains both the using-directive and the nominated
+ // namespace. [Note: in this context, “contains” means “contains
+ // directly or indirectly”. ]
+
+ // Find enclosing context containing both using-directive and
+ // nominated namespace.
+ DeclContext *CommonAncestor = cast<DeclContext>(NS);
+ while (CommonAncestor && !CommonAncestor->Encloses(CurContext))
+ CommonAncestor = CommonAncestor->getParent();
+
+ UDir = UsingDirectiveDecl::Create(Context,
+ CurContext, UsingLoc,
+ NamespcLoc,
+ SS.getRange(),
+ (NestedNameSpecifier *)SS.getScopeRep(),
+ IdentLoc,
+ cast<NamespaceDecl>(NS),
+ CommonAncestor);
+ PushUsingDirective(S, UDir);
+ } else {
+ Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
+ }
+
+ // FIXME: We ignore attributes for now.
+ delete AttrList;
+ return DeclPtrTy::make(UDir);
+}
+
+void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) {
+ // If scope has associated entity, then using directive is at namespace
+ // or translation unit scope. We add UsingDirectiveDecls, into
+ // it's lookup structure.
+ if (DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity()))
+ Ctx->addDecl(Context, UDir);
+ else
+ // Otherwise it is block-sope. using-directives will affect lookup
+ // only to the end of scope.
+ S->PushUsingDirective(DeclPtrTy::make(UDir));
+}
+
+/// getNamespaceDecl - Returns the namespace a decl represents. If the decl
+/// is a namespace alias, returns the namespace it points to.
+static inline NamespaceDecl *getNamespaceDecl(NamedDecl *D) {
+ if (NamespaceAliasDecl *AD = dyn_cast_or_null<NamespaceAliasDecl>(D))
+ return AD->getNamespace();
+ return dyn_cast_or_null<NamespaceDecl>(D);
+}
+
+Sema::DeclPtrTy Sema::ActOnNamespaceAliasDef(Scope *S,
+ SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ const CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident) {
+
+ // Lookup the namespace name.
+ LookupResult R = LookupParsedName(S, &SS, Ident, LookupNamespaceName, false);
+
+ // Check if we have a previous declaration with the same name.
+ if (NamedDecl *PrevDecl = LookupName(S, Alias, LookupOrdinaryName, true)) {
+ if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(PrevDecl)) {
+ // We already have an alias with the same name that points to the same
+ // namespace, so don't create a new one.
+ if (!R.isAmbiguous() && AD->getNamespace() == getNamespaceDecl(R))
+ return DeclPtrTy();
+ }
+
+ unsigned DiagID = isa<NamespaceDecl>(PrevDecl) ? diag::err_redefinition :
+ diag::err_redefinition_different_kind;
+ Diag(AliasLoc, DiagID) << Alias;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return DeclPtrTy();
+ }
+
+ if (R.isAmbiguous()) {
+ DiagnoseAmbiguousLookup(R, Ident, IdentLoc);
+ return DeclPtrTy();
+ }
+
+ if (!R) {
+ Diag(NamespaceLoc, diag::err_expected_namespace_name) << SS.getRange();
+ return DeclPtrTy();
+ }
+
+ NamespaceAliasDecl *AliasDecl =
+ NamespaceAliasDecl::Create(Context, CurContext, NamespaceLoc, AliasLoc,
+ Alias, SS.getRange(),
+ (NestedNameSpecifier *)SS.getScopeRep(),
+ IdentLoc, R);
+
+ CurContext->addDecl(Context, AliasDecl);
+ return DeclPtrTy::make(AliasDecl);
+}
+
+void Sema::InitializeVarWithConstructor(VarDecl *VD,
+ CXXConstructorDecl *Constructor,
+ QualType DeclInitType,
+ Expr **Exprs, unsigned NumExprs) {
+ Expr *Temp = CXXConstructExpr::Create(Context, DeclInitType, Constructor,
+ false, Exprs, NumExprs);
+ VD->setInit(Context, Temp);
+}
+
+/// AddCXXDirectInitializerToDecl - This action is called immediately after
+/// ActOnDeclarator, when a C++ direct initializer is present.
+/// e.g: "int x(1);"
+void Sema::AddCXXDirectInitializerToDecl(DeclPtrTy Dcl,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ unsigned NumExprs = Exprs.size();
+ assert(NumExprs != 0 && Exprs.get() && "missing expressions");
+ Decl *RealDecl = Dcl.getAs<Decl>();
+
+ // If there is no declaration, there was an error parsing it. Just ignore
+ // the initializer.
+ if (RealDecl == 0)
+ return;
+
+ VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
+ if (!VDecl) {
+ Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ // FIXME: Need to handle dependent types and expressions here.
+
+ // We will treat direct-initialization as a copy-initialization:
+ // int x(1); -as-> int x = 1;
+ // ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
+ //
+ // Clients that want to distinguish between the two forms, can check for
+ // direct initializer using VarDecl::hasCXXDirectInitializer().
+ // A major benefit is that clients that don't particularly care about which
+ // exactly form was it (like the CodeGen) can handle both cases without
+ // special case code.
+
+ // C++ 8.5p11:
+ // The form of initialization (using parentheses or '=') is generally
+ // insignificant, but does matter when the entity being initialized has a
+ // class type.
+ QualType DeclInitType = VDecl->getType();
+ if (const ArrayType *Array = Context.getAsArrayType(DeclInitType))
+ DeclInitType = Array->getElementType();
+
+ // FIXME: This isn't the right place to complete the type.
+ if (RequireCompleteType(VDecl->getLocation(), VDecl->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ if (VDecl->getType()->isRecordType()) {
+ CXXConstructorDecl *Constructor
+ = PerformInitializationByConstructor(DeclInitType,
+ (Expr **)Exprs.get(), NumExprs,
+ VDecl->getLocation(),
+ SourceRange(VDecl->getLocation(),
+ RParenLoc),
+ VDecl->getDeclName(),
+ IK_Direct);
+ if (!Constructor)
+ RealDecl->setInvalidDecl();
+ else {
+ VDecl->setCXXDirectInitializer(true);
+ InitializeVarWithConstructor(VDecl, Constructor, DeclInitType,
+ (Expr**)Exprs.release(), NumExprs);
+ }
+ return;
+ }
+
+ if (NumExprs > 1) {
+ Diag(CommaLocs[0], diag::err_builtin_direct_init_more_than_one_arg)
+ << SourceRange(VDecl->getLocation(), RParenLoc);
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ // Let clients know that initialization was done with a direct initializer.
+ VDecl->setCXXDirectInitializer(true);
+
+ assert(NumExprs == 1 && "Expected 1 expression");
+ // Set the init expression, handles conversions.
+ AddInitializerToDecl(Dcl, ExprArg(*this, Exprs.release()[0]),
+ /*DirectInit=*/true);
+}
+
+/// PerformInitializationByConstructor - Perform initialization by
+/// constructor (C++ [dcl.init]p14), which may occur as part of
+/// direct-initialization or copy-initialization. We are initializing
+/// an object of type @p ClassType with the given arguments @p
+/// Args. @p Loc is the location in the source code where the
+/// initializer occurs (e.g., a declaration, member initializer,
+/// functional cast, etc.) while @p Range covers the whole
+/// initialization. @p InitEntity is the entity being initialized,
+/// which may by the name of a declaration or a type. @p Kind is the
+/// kind of initialization we're performing, which affects whether
+/// explicit constructors will be considered. When successful, returns
+/// the constructor that will be used to perform the initialization;
+/// when the initialization fails, emits a diagnostic and returns
+/// null.
+CXXConstructorDecl *
+Sema::PerformInitializationByConstructor(QualType ClassType,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation Loc, SourceRange Range,
+ DeclarationName InitEntity,
+ InitializationKind Kind) {
+ const RecordType *ClassRec = ClassType->getAsRecordType();
+ assert(ClassRec && "Can only initialize a class type here");
+
+ // C++ [dcl.init]p14:
+ //
+ // If the initialization is direct-initialization, or if it is
+ // copy-initialization where the cv-unqualified version of the
+ // source type is the same class as, or a derived class of, the
+ // class of the destination, constructors are considered. The
+ // applicable constructors are enumerated (13.3.1.3), and the
+ // best one is chosen through overload resolution (13.3). The
+ // constructor so selected is called to initialize the object,
+ // with the initializer expression(s) as its argument(s). If no
+ // constructor applies, or the overload resolution is ambiguous,
+ // the initialization is ill-formed.
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassRec->getDecl());
+ OverloadCandidateSet CandidateSet;
+
+ // Add constructors to the overload set.
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType.getUnqualifiedType()));
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = ClassDecl->lookup(Context, ConstructorName);
+ Con != ConEnd; ++Con) {
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if ((Kind == IK_Direct) ||
+ (Kind == IK_Copy && Constructor->isConvertingConstructor()) ||
+ (Kind == IK_Default && Constructor->isDefaultConstructor()))
+ AddOverloadCandidate(Constructor, Args, NumArgs, CandidateSet);
+ }
+
+ // FIXME: When we decide not to synthesize the implicitly-declared
+ // constructors, we'll need to make them appear here.
+
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success:
+ // We found a constructor. Return it.
+ return cast<CXXConstructorDecl>(Best->Function);
+
+ case OR_No_Viable_Function:
+ if (InitEntity)
+ Diag(Loc, diag::err_ovl_no_viable_function_in_init)
+ << InitEntity << Range;
+ else
+ Diag(Loc, diag::err_ovl_no_viable_function_in_init)
+ << ClassType << Range;
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/false);
+ return 0;
+
+ case OR_Ambiguous:
+ if (InitEntity)
+ Diag(Loc, diag::err_ovl_ambiguous_init) << InitEntity << Range;
+ else
+ Diag(Loc, diag::err_ovl_ambiguous_init) << ClassType << Range;
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return 0;
+
+ case OR_Deleted:
+ if (InitEntity)
+ Diag(Loc, diag::err_ovl_deleted_init)
+ << Best->Function->isDeleted()
+ << InitEntity << Range;
+ else
+ Diag(Loc, diag::err_ovl_deleted_init)
+ << Best->Function->isDeleted()
+ << InitEntity << Range;
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return 0;
+ }
+
+ return 0;
+}
+
+/// CompareReferenceRelationship - Compare the two types T1 and T2 to
+/// determine whether they are reference-related,
+/// reference-compatible, reference-compatible with added
+/// qualification, or incompatible, for use in C++ initialization by
+/// reference (C++ [dcl.ref.init]p4). Neither type can be a reference
+/// type, and the first type (T1) is the pointee type of the reference
+/// type being initialized.
+Sema::ReferenceCompareResult
+Sema::CompareReferenceRelationship(QualType T1, QualType T2,
+ bool& DerivedToBase) {
+ assert(!T1->isReferenceType() &&
+ "T1 must be the pointee type of the reference type");
+ assert(!T2->isReferenceType() && "T2 cannot be a reference type");
+
+ T1 = Context.getCanonicalType(T1);
+ T2 = Context.getCanonicalType(T2);
+ QualType UnqualT1 = T1.getUnqualifiedType();
+ QualType UnqualT2 = T2.getUnqualifiedType();
+
+ // C++ [dcl.init.ref]p4:
+ // Given types “cv1 T1” and “cv2 T2,” “cv1 T1” is
+ // reference-related to “cv2 T2” if T1 is the same type as T2, or
+ // T1 is a base class of T2.
+ if (UnqualT1 == UnqualT2)
+ DerivedToBase = false;
+ else if (IsDerivedFrom(UnqualT2, UnqualT1))
+ DerivedToBase = true;
+ else
+ return Ref_Incompatible;
+
+ // At this point, we know that T1 and T2 are reference-related (at
+ // least).
+
+ // C++ [dcl.init.ref]p4:
+ // "cv1 T1” is reference-compatible with “cv2 T2” if T1 is
+ // reference-related to T2 and cv1 is the same cv-qualification
+ // as, or greater cv-qualification than, cv2. For purposes of
+ // overload resolution, cases for which cv1 is greater
+ // cv-qualification than cv2 are identified as
+ // reference-compatible with added qualification (see 13.3.3.2).
+ if (T1.getCVRQualifiers() == T2.getCVRQualifiers())
+ return Ref_Compatible;
+ else if (T1.isMoreQualifiedThan(T2))
+ return Ref_Compatible_With_Added_Qualification;
+ else
+ return Ref_Related;
+}
+
+/// CheckReferenceInit - Check the initialization of a reference
+/// variable with the given initializer (C++ [dcl.init.ref]). Init is
+/// the initializer (either a simple initializer or an initializer
+/// list), and DeclType is the type of the declaration. When ICS is
+/// non-null, this routine will compute the implicit conversion
+/// sequence according to C++ [over.ics.ref] and will not produce any
+/// diagnostics; when ICS is null, it will emit diagnostics when any
+/// errors are found. Either way, a return value of true indicates
+/// that there was a failure, a return value of false indicates that
+/// the reference initialization succeeded.
+///
+/// When @p SuppressUserConversions, user-defined conversions are
+/// suppressed.
+/// When @p AllowExplicit, we also permit explicit user-defined
+/// conversion functions.
+/// When @p ForceRValue, we unconditionally treat the initializer as an rvalue.
+bool
+Sema::CheckReferenceInit(Expr *&Init, QualType DeclType,
+ ImplicitConversionSequence *ICS,
+ bool SuppressUserConversions,
+ bool AllowExplicit, bool ForceRValue) {
+ assert(DeclType->isReferenceType() && "Reference init needs a reference");
+
+ QualType T1 = DeclType->getAsReferenceType()->getPointeeType();
+ QualType T2 = Init->getType();
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (Context.getCanonicalType(T2) == Context.OverloadTy) {
+ FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Init, DeclType,
+ ICS != 0);
+ if (Fn) {
+ // Since we're performing this reference-initialization for
+ // real, update the initializer with the resulting function.
+ if (!ICS) {
+ if (DiagnoseUseOfDecl(Fn, Init->getSourceRange().getBegin()))
+ return true;
+
+ FixOverloadedFunctionReference(Init, Fn);
+ }
+
+ T2 = Fn->getType();
+ }
+ }
+
+ // Compute some basic properties of the types and the initializer.
+ bool isRValRef = DeclType->isRValueReferenceType();
+ bool DerivedToBase = false;
+ Expr::isLvalueResult InitLvalue = ForceRValue ? Expr::LV_InvalidExpression :
+ Init->isLvalue(Context);
+ ReferenceCompareResult RefRelationship
+ = CompareReferenceRelationship(T1, T2, DerivedToBase);
+
+ // Most paths end in a failed conversion.
+ if (ICS)
+ ICS->ConversionKind = ImplicitConversionSequence::BadConversion;
+
+ // C++ [dcl.init.ref]p5:
+ // A reference to type “cv1 T1” is initialized by an expression
+ // of type “cv2 T2” as follows:
+
+ // -- If the initializer expression
+
+ // Rvalue references cannot bind to lvalues (N2812).
+ // There is absolutely no situation where they can. In particular, note that
+ // this is ill-formed, even if B has a user-defined conversion to A&&:
+ // B b;
+ // A&& r = b;
+ if (isRValRef && InitLvalue == Expr::LV_Valid) {
+ if (!ICS)
+ Diag(Init->getSourceRange().getBegin(), diag::err_lvalue_to_rvalue_ref)
+ << Init->getSourceRange();
+ return true;
+ }
+
+ bool BindsDirectly = false;
+ // -- is an lvalue (but is not a bit-field), and “cv1 T1” is
+ // reference-compatible with “cv2 T2,” or
+ //
+ // Note that the bit-field check is skipped if we are just computing
+ // the implicit conversion sequence (C++ [over.best.ics]p2).
+ if (InitLvalue == Expr::LV_Valid && (ICS || !Init->getBitField()) &&
+ RefRelationship >= Ref_Compatible_With_Added_Qualification) {
+ BindsDirectly = true;
+
+ if (ICS) {
+ // C++ [over.ics.ref]p1:
+ // When a parameter of reference type binds directly (8.5.3)
+ // to an argument expression, the implicit conversion sequence
+ // is the identity conversion, unless the argument expression
+ // has a type that is a derived class of the parameter type,
+ // in which case the implicit conversion sequence is a
+ // derived-to-base Conversion (13.3.3.1).
+ ICS->ConversionKind = ImplicitConversionSequence::StandardConversion;
+ ICS->Standard.First = ICK_Identity;
+ ICS->Standard.Second = DerivedToBase? ICK_Derived_To_Base : ICK_Identity;
+ ICS->Standard.Third = ICK_Identity;
+ ICS->Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS->Standard.ToTypePtr = T1.getAsOpaquePtr();
+ ICS->Standard.ReferenceBinding = true;
+ ICS->Standard.DirectBinding = true;
+ ICS->Standard.RRefBinding = false;
+ ICS->Standard.CopyConstructor = 0;
+
+ // Nothing more to do: the inaccessibility/ambiguity check for
+ // derived-to-base conversions is suppressed when we're
+ // computing the implicit conversion sequence (C++
+ // [over.best.ics]p2).
+ return false;
+ } else {
+ // Perform the conversion.
+ // FIXME: Binding to a subobject of the lvalue is going to require more
+ // AST annotation than this.
+ ImpCastExprToType(Init, T1, /*isLvalue=*/true);
+ }
+ }
+
+ // -- has a class type (i.e., T2 is a class type) and can be
+ // implicitly converted to an lvalue of type “cv3 T3,”
+ // where “cv1 T1” is reference-compatible with “cv3 T3”
+ // 92) (this conversion is selected by enumerating the
+ // applicable conversion functions (13.3.1.6) and choosing
+ // the best one through overload resolution (13.3)),
+ if (!isRValRef && !SuppressUserConversions && T2->isRecordType()) {
+ // FIXME: Look for conversions in base classes!
+ CXXRecordDecl *T2RecordDecl
+ = dyn_cast<CXXRecordDecl>(T2->getAsRecordType()->getDecl());
+
+ OverloadCandidateSet CandidateSet;
+ OverloadedFunctionDecl *Conversions
+ = T2RecordDecl->getConversionFunctions();
+ for (OverloadedFunctionDecl::function_iterator Func
+ = Conversions->function_begin();
+ Func != Conversions->function_end(); ++Func) {
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(*Func);
+
+ // If the conversion function doesn't return a reference type,
+ // it can't be considered for this conversion.
+ if (Conv->getConversionType()->isLValueReferenceType() &&
+ (AllowExplicit || !Conv->isExplicit()))
+ AddConversionCandidate(Conv, Init, DeclType, CandidateSet);
+ }
+
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success:
+ // This is a direct binding.
+ BindsDirectly = true;
+
+ if (ICS) {
+ // C++ [over.ics.ref]p1:
+ //
+ // [...] If the parameter binds directly to the result of
+ // applying a conversion function to the argument
+ // expression, the implicit conversion sequence is a
+ // user-defined conversion sequence (13.3.3.1.2), with the
+ // second standard conversion sequence either an identity
+ // conversion or, if the conversion function returns an
+ // entity of a type that is a derived class of the parameter
+ // type, a derived-to-base Conversion.
+ ICS->ConversionKind = ImplicitConversionSequence::UserDefinedConversion;
+ ICS->UserDefined.Before = Best->Conversions[0].Standard;
+ ICS->UserDefined.After = Best->FinalConversion;
+ ICS->UserDefined.ConversionFunction = Best->Function;
+ assert(ICS->UserDefined.After.ReferenceBinding &&
+ ICS->UserDefined.After.DirectBinding &&
+ "Expected a direct reference binding!");
+ return false;
+ } else {
+ // Perform the conversion.
+ // FIXME: Binding to a subobject of the lvalue is going to require more
+ // AST annotation than this.
+ ImpCastExprToType(Init, T1, /*isLvalue=*/true);
+ }
+ break;
+
+ case OR_Ambiguous:
+ assert(false && "Ambiguous reference binding conversions not implemented.");
+ return true;
+
+ case OR_No_Viable_Function:
+ case OR_Deleted:
+ // There was no suitable conversion, or we found a deleted
+ // conversion; continue with other checks.
+ break;
+ }
+ }
+
+ if (BindsDirectly) {
+ // C++ [dcl.init.ref]p4:
+ // [...] In all cases where the reference-related or
+ // reference-compatible relationship of two types is used to
+ // establish the validity of a reference binding, and T1 is a
+ // base class of T2, a program that necessitates such a binding
+ // is ill-formed if T1 is an inaccessible (clause 11) or
+ // ambiguous (10.2) base class of T2.
+ //
+ // Note that we only check this condition when we're allowed to
+ // complain about errors, because we should not be checking for
+ // ambiguity (or inaccessibility) unless the reference binding
+ // actually happens.
+ if (DerivedToBase)
+ return CheckDerivedToBaseConversion(T2, T1,
+ Init->getSourceRange().getBegin(),
+ Init->getSourceRange());
+ else
+ return false;
+ }
+
+ // -- Otherwise, the reference shall be to a non-volatile const
+ // type (i.e., cv1 shall be const), or the reference shall be an
+ // rvalue reference and the initializer expression shall be an rvalue.
+ if (!isRValRef && T1.getCVRQualifiers() != QualType::Const) {
+ if (!ICS)
+ Diag(Init->getSourceRange().getBegin(),
+ diag::err_not_reference_to_const_init)
+ << T1 << (InitLvalue != Expr::LV_Valid? "temporary" : "value")
+ << T2 << Init->getSourceRange();
+ return true;
+ }
+
+ // -- If the initializer expression is an rvalue, with T2 a
+ // class type, and “cv1 T1” is reference-compatible with
+ // “cv2 T2,” the reference is bound in one of the
+ // following ways (the choice is implementation-defined):
+ //
+ // -- The reference is bound to the object represented by
+ // the rvalue (see 3.10) or to a sub-object within that
+ // object.
+ //
+ // -- A temporary of type “cv1 T2” [sic] is created, and
+ // a constructor is called to copy the entire rvalue
+ // object into the temporary. The reference is bound to
+ // the temporary or to a sub-object within the
+ // temporary.
+ //
+ // The constructor that would be used to make the copy
+ // shall be callable whether or not the copy is actually
+ // done.
+ //
+ // Note that C++0x [dcl.init.ref]p5 takes away this implementation
+ // freedom, so we will always take the first option and never build
+ // a temporary in this case. FIXME: We will, however, have to check
+ // for the presence of a copy constructor in C++98/03 mode.
+ if (InitLvalue != Expr::LV_Valid && T2->isRecordType() &&
+ RefRelationship >= Ref_Compatible_With_Added_Qualification) {
+ if (ICS) {
+ ICS->ConversionKind = ImplicitConversionSequence::StandardConversion;
+ ICS->Standard.First = ICK_Identity;
+ ICS->Standard.Second = DerivedToBase? ICK_Derived_To_Base : ICK_Identity;
+ ICS->Standard.Third = ICK_Identity;
+ ICS->Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS->Standard.ToTypePtr = T1.getAsOpaquePtr();
+ ICS->Standard.ReferenceBinding = true;
+ ICS->Standard.DirectBinding = false;
+ ICS->Standard.RRefBinding = isRValRef;
+ ICS->Standard.CopyConstructor = 0;
+ } else {
+ // FIXME: Binding to a subobject of the rvalue is going to require more
+ // AST annotation than this.
+ ImpCastExprToType(Init, T1, /*isLvalue=*/false);
+ }
+ return false;
+ }
+
+ // -- Otherwise, a temporary of type “cv1 T1” is created and
+ // initialized from the initializer expression using the
+ // rules for a non-reference copy initialization (8.5). The
+ // reference is then bound to the temporary. If T1 is
+ // reference-related to T2, cv1 must be the same
+ // cv-qualification as, or greater cv-qualification than,
+ // cv2; otherwise, the program is ill-formed.
+ if (RefRelationship == Ref_Related) {
+ // If cv1 == cv2 or cv1 is a greater cv-qualified than cv2, then
+ // we would be reference-compatible or reference-compatible with
+ // added qualification. But that wasn't the case, so the reference
+ // initialization fails.
+ if (!ICS)
+ Diag(Init->getSourceRange().getBegin(),
+ diag::err_reference_init_drops_quals)
+ << T1 << (InitLvalue != Expr::LV_Valid? "temporary" : "value")
+ << T2 << Init->getSourceRange();
+ return true;
+ }
+
+ // If at least one of the types is a class type, the types are not
+ // related, and we aren't allowed any user conversions, the
+ // reference binding fails. This case is important for breaking
+ // recursion, since TryImplicitConversion below will attempt to
+ // create a temporary through the use of a copy constructor.
+ if (SuppressUserConversions && RefRelationship == Ref_Incompatible &&
+ (T1->isRecordType() || T2->isRecordType())) {
+ if (!ICS)
+ Diag(Init->getSourceRange().getBegin(),
+ diag::err_typecheck_convert_incompatible)
+ << DeclType << Init->getType() << "initializing" << Init->getSourceRange();
+ return true;
+ }
+
+ // Actually try to convert the initializer to T1.
+ if (ICS) {
+ // C++ [over.ics.ref]p2:
+ //
+ // When a parameter of reference type is not bound directly to
+ // an argument expression, the conversion sequence is the one
+ // required to convert the argument expression to the
+ // underlying type of the reference according to
+ // 13.3.3.1. Conceptually, this conversion sequence corresponds
+ // to copy-initializing a temporary of the underlying type with
+ // the argument expression. Any difference in top-level
+ // cv-qualification is subsumed by the initialization itself
+ // and does not constitute a conversion.
+ *ICS = TryImplicitConversion(Init, T1, SuppressUserConversions);
+ // Of course, that's still a reference binding.
+ if (ICS->ConversionKind == ImplicitConversionSequence::StandardConversion) {
+ ICS->Standard.ReferenceBinding = true;
+ ICS->Standard.RRefBinding = isRValRef;
+ } else if(ICS->ConversionKind ==
+ ImplicitConversionSequence::UserDefinedConversion) {
+ ICS->UserDefined.After.ReferenceBinding = true;
+ ICS->UserDefined.After.RRefBinding = isRValRef;
+ }
+ return ICS->ConversionKind == ImplicitConversionSequence::BadConversion;
+ } else {
+ return PerformImplicitConversion(Init, T1, "initializing");
+ }
+}
+
+/// CheckOverloadedOperatorDeclaration - Check whether the declaration
+/// of this overloaded operator is well-formed. If so, returns false;
+/// otherwise, emits appropriate diagnostics and returns true.
+bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
+ assert(FnDecl && FnDecl->isOverloadedOperator() &&
+ "Expected an overloaded operator declaration");
+
+ OverloadedOperatorKind Op = FnDecl->getOverloadedOperator();
+
+ // C++ [over.oper]p5:
+ // The allocation and deallocation functions, operator new,
+ // operator new[], operator delete and operator delete[], are
+ // described completely in 3.7.3. The attributes and restrictions
+ // found in the rest of this subclause do not apply to them unless
+ // explicitly stated in 3.7.3.
+ // FIXME: Write a separate routine for checking this. For now, just allow it.
+ if (Op == OO_New || Op == OO_Array_New ||
+ Op == OO_Delete || Op == OO_Array_Delete)
+ return false;
+
+ // C++ [over.oper]p6:
+ // An operator function shall either be a non-static member
+ // function or be a non-member function and have at least one
+ // parameter whose type is a class, a reference to a class, an
+ // enumeration, or a reference to an enumeration.
+ if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ if (MethodDecl->isStatic())
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_static) << FnDecl->getDeclName();
+ } else {
+ bool ClassOrEnumParam = false;
+ for (FunctionDecl::param_iterator Param = FnDecl->param_begin(),
+ ParamEnd = FnDecl->param_end();
+ Param != ParamEnd; ++Param) {
+ QualType ParamType = (*Param)->getType().getNonReferenceType();
+ if (ParamType->isRecordType() || ParamType->isEnumeralType()) {
+ ClassOrEnumParam = true;
+ break;
+ }
+ }
+
+ if (!ClassOrEnumParam)
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_needs_class_or_enum)
+ << FnDecl->getDeclName();
+ }
+
+ // C++ [over.oper]p8:
+ // An operator function cannot have default arguments (8.3.6),
+ // except where explicitly stated below.
+ //
+ // Only the function-call operator allows default arguments
+ // (C++ [over.call]p1).
+ if (Op != OO_Call) {
+ for (FunctionDecl::param_iterator Param = FnDecl->param_begin();
+ Param != FnDecl->param_end(); ++Param) {
+ if ((*Param)->hasUnparsedDefaultArg())
+ return Diag((*Param)->getLocation(),
+ diag::err_operator_overload_default_arg)
+ << FnDecl->getDeclName();
+ else if (Expr *DefArg = (*Param)->getDefaultArg())
+ return Diag((*Param)->getLocation(),
+ diag::err_operator_overload_default_arg)
+ << FnDecl->getDeclName() << DefArg->getSourceRange();
+ }
+ }
+
+ static const bool OperatorUses[NUM_OVERLOADED_OPERATORS][3] = {
+ { false, false, false }
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ , { Unary, Binary, MemberOnly }
+#include "clang/Basic/OperatorKinds.def"
+ };
+
+ bool CanBeUnaryOperator = OperatorUses[Op][0];
+ bool CanBeBinaryOperator = OperatorUses[Op][1];
+ bool MustBeMemberOperator = OperatorUses[Op][2];
+
+ // C++ [over.oper]p8:
+ // [...] Operator functions cannot have more or fewer parameters
+ // than the number required for the corresponding operator, as
+ // described in the rest of this subclause.
+ unsigned NumParams = FnDecl->getNumParams()
+ + (isa<CXXMethodDecl>(FnDecl)? 1 : 0);
+ if (Op != OO_Call &&
+ ((NumParams == 1 && !CanBeUnaryOperator) ||
+ (NumParams == 2 && !CanBeBinaryOperator) ||
+ (NumParams < 1) || (NumParams > 2))) {
+ // We have the wrong number of parameters.
+ unsigned ErrorKind;
+ if (CanBeUnaryOperator && CanBeBinaryOperator) {
+ ErrorKind = 2; // 2 -> unary or binary.
+ } else if (CanBeUnaryOperator) {
+ ErrorKind = 0; // 0 -> unary
+ } else {
+ assert(CanBeBinaryOperator &&
+ "All non-call overloaded operators are unary or binary!");
+ ErrorKind = 1; // 1 -> binary
+ }
+
+ return Diag(FnDecl->getLocation(), diag::err_operator_overload_must_be)
+ << FnDecl->getDeclName() << NumParams << ErrorKind;
+ }
+
+ // Overloaded operators other than operator() cannot be variadic.
+ if (Op != OO_Call &&
+ FnDecl->getType()->getAsFunctionProtoType()->isVariadic()) {
+ return Diag(FnDecl->getLocation(), diag::err_operator_overload_variadic)
+ << FnDecl->getDeclName();
+ }
+
+ // Some operators must be non-static member functions.
+ if (MustBeMemberOperator && !isa<CXXMethodDecl>(FnDecl)) {
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_must_be_member)
+ << FnDecl->getDeclName();
+ }
+
+ // C++ [over.inc]p1:
+ // The user-defined function called operator++ implements the
+ // prefix and postfix ++ operator. If this function is a member
+ // function with no parameters, or a non-member function with one
+ // parameter of class or enumeration type, it defines the prefix
+ // increment operator ++ for objects of that type. If the function
+ // is a member function with one parameter (which shall be of type
+ // int) or a non-member function with two parameters (the second
+ // of which shall be of type int), it defines the postfix
+ // increment operator ++ for objects of that type.
+ if ((Op == OO_PlusPlus || Op == OO_MinusMinus) && NumParams == 2) {
+ ParmVarDecl *LastParam = FnDecl->getParamDecl(FnDecl->getNumParams() - 1);
+ bool ParamIsInt = false;
+ if (const BuiltinType *BT = LastParam->getType()->getAsBuiltinType())
+ ParamIsInt = BT->getKind() == BuiltinType::Int;
+
+ if (!ParamIsInt)
+ return Diag(LastParam->getLocation(),
+ diag::err_operator_overload_post_incdec_must_be_int)
+ << LastParam->getType() << (Op == OO_MinusMinus);
+ }
+
+ // Notify the class if it got an assignment operator.
+ if (Op == OO_Equal) {
+ // Would have returned earlier otherwise.
+ assert(isa<CXXMethodDecl>(FnDecl) &&
+ "Overloaded = not member, but not filtered.");
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
+ Method->getParent()->addedAssignmentOperator(Context, Method);
+ }
+
+ return false;
+}
+
+/// ActOnStartLinkageSpecification - Parsed the beginning of a C++
+/// linkage specification, including the language and (if present)
+/// the '{'. ExternLoc is the location of the 'extern', LangLoc is
+/// the location of the language string literal, which is provided
+/// by Lang/StrSize. LBraceLoc, if valid, provides the location of
+/// the '{' brace. Otherwise, this linkage specification does not
+/// have any braces.
+Sema::DeclPtrTy Sema::ActOnStartLinkageSpecification(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ const char *Lang,
+ unsigned StrSize,
+ SourceLocation LBraceLoc) {
+ LinkageSpecDecl::LanguageIDs Language;
+ if (strncmp(Lang, "\"C\"", StrSize) == 0)
+ Language = LinkageSpecDecl::lang_c;
+ else if (strncmp(Lang, "\"C++\"", StrSize) == 0)
+ Language = LinkageSpecDecl::lang_cxx;
+ else {
+ Diag(LangLoc, diag::err_bad_language);
+ return DeclPtrTy();
+ }
+
+ // FIXME: Add all the various semantics of linkage specifications
+
+ LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext,
+ LangLoc, Language,
+ LBraceLoc.isValid());
+ CurContext->addDecl(Context, D);
+ PushDeclContext(S, D);
+ return DeclPtrTy::make(D);
+}
+
+/// ActOnFinishLinkageSpecification - Completely the definition of
+/// the C++ linkage specification LinkageSpec. If RBraceLoc is
+/// valid, it's the position of the closing '}' brace in a linkage
+/// specification that uses braces.
+Sema::DeclPtrTy Sema::ActOnFinishLinkageSpecification(Scope *S,
+ DeclPtrTy LinkageSpec,
+ SourceLocation RBraceLoc) {
+ if (LinkageSpec)
+ PopDeclContext();
+ return LinkageSpec;
+}
+
+/// \brief Perform semantic analysis for the variable declaration that
+/// occurs within a C++ catch clause, returning the newly-created
+/// variable.
+VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType,
+ IdentifierInfo *Name,
+ SourceLocation Loc,
+ SourceRange Range) {
+ bool Invalid = false;
+
+ // Arrays and functions decay.
+ if (ExDeclType->isArrayType())
+ ExDeclType = Context.getArrayDecayedType(ExDeclType);
+ else if (ExDeclType->isFunctionType())
+ ExDeclType = Context.getPointerType(ExDeclType);
+
+ // C++ 15.3p1: The exception-declaration shall not denote an incomplete type.
+ // The exception-declaration shall not denote a pointer or reference to an
+ // incomplete type, other than [cv] void*.
+ // N2844 forbids rvalue references.
+ if(!ExDeclType->isDependentType() && ExDeclType->isRValueReferenceType()) {
+ Diag(Loc, diag::err_catch_rvalue_ref) << Range;
+ Invalid = true;
+ }
+
+ QualType BaseType = ExDeclType;
+ int Mode = 0; // 0 for direct type, 1 for pointer, 2 for reference
+ unsigned DK = diag::err_catch_incomplete;
+ if (const PointerType *Ptr = BaseType->getAsPointerType()) {
+ BaseType = Ptr->getPointeeType();
+ Mode = 1;
+ DK = diag::err_catch_incomplete_ptr;
+ } else if(const ReferenceType *Ref = BaseType->getAsReferenceType()) {
+ // For the purpose of error recovery, we treat rvalue refs like lvalue refs.
+ BaseType = Ref->getPointeeType();
+ Mode = 2;
+ DK = diag::err_catch_incomplete_ref;
+ }
+ if (!Invalid && (Mode == 0 || !BaseType->isVoidType()) &&
+ !BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
+ Invalid = true;
+
+ if (!Invalid && !ExDeclType->isDependentType() &&
+ RequireNonAbstractType(Loc, ExDeclType,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ Invalid = true;
+
+ // FIXME: Need to test for ability to copy-construct and destroy the
+ // exception variable.
+
+ // FIXME: Need to check for abstract classes.
+
+ VarDecl *ExDecl = VarDecl::Create(Context, CurContext, Loc,
+ Name, ExDeclType, VarDecl::None,
+ Range.getBegin());
+
+ if (Invalid)
+ ExDecl->setInvalidDecl();
+
+ return ExDecl;
+}
+
+/// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch
+/// handler.
+Sema::DeclPtrTy Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
+ QualType ExDeclType = GetTypeForDeclarator(D, S);
+
+ bool Invalid = D.isInvalidType();
+ IdentifierInfo *II = D.getIdentifier();
+ if (NamedDecl *PrevDecl = LookupName(S, II, LookupOrdinaryName)) {
+ // The scope should be freshly made just for us. There is just no way
+ // it contains any previous declaration.
+ assert(!S->isDeclScope(DeclPtrTy::make(PrevDecl)));
+ if (PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ }
+ }
+
+ if (D.getCXXScopeSpec().isSet() && !Invalid) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_catch_declarator)
+ << D.getCXXScopeSpec().getRange();
+ Invalid = true;
+ }
+
+ VarDecl *ExDecl = BuildExceptionDeclaration(S, ExDeclType,
+ D.getIdentifier(),
+ D.getIdentifierLoc(),
+ D.getDeclSpec().getSourceRange());
+
+ if (Invalid)
+ ExDecl->setInvalidDecl();
+
+ // Add the exception declaration into this scope.
+ if (II)
+ PushOnScopeChains(ExDecl, S);
+ else
+ CurContext->addDecl(Context, ExDecl);
+
+ ProcessDeclAttributes(ExDecl, D);
+ return DeclPtrTy::make(ExDecl);
+}
+
+Sema::DeclPtrTy Sema::ActOnStaticAssertDeclaration(SourceLocation AssertLoc,
+ ExprArg assertexpr,
+ ExprArg assertmessageexpr) {
+ Expr *AssertExpr = (Expr *)assertexpr.get();
+ StringLiteral *AssertMessage =
+ cast<StringLiteral>((Expr *)assertmessageexpr.get());
+
+ if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent()) {
+ llvm::APSInt Value(32);
+ if (!AssertExpr->isIntegerConstantExpr(Value, Context)) {
+ Diag(AssertLoc, diag::err_static_assert_expression_is_not_constant) <<
+ AssertExpr->getSourceRange();
+ return DeclPtrTy();
+ }
+
+ if (Value == 0) {
+ std::string str(AssertMessage->getStrData(),
+ AssertMessage->getByteLength());
+ Diag(AssertLoc, diag::err_static_assert_failed)
+ << str << AssertExpr->getSourceRange();
+ }
+ }
+
+ assertexpr.release();
+ assertmessageexpr.release();
+ Decl *Decl = StaticAssertDecl::Create(Context, CurContext, AssertLoc,
+ AssertExpr, AssertMessage);
+
+ CurContext->addDecl(Context, Decl);
+ return DeclPtrTy::make(Decl);
+}
+
+bool Sema::ActOnFriendDecl(Scope *S, SourceLocation FriendLoc, DeclPtrTy Dcl) {
+ if (!(S->getFlags() & Scope::ClassScope)) {
+ Diag(FriendLoc, diag::err_friend_decl_outside_class);
+ return true;
+ }
+
+ return false;
+}
+
+void Sema::SetDeclDeleted(DeclPtrTy dcl, SourceLocation DelLoc) {
+ Decl *Dcl = dcl.getAs<Decl>();
+ FunctionDecl *Fn = dyn_cast<FunctionDecl>(Dcl);
+ if (!Fn) {
+ Diag(DelLoc, diag::err_deleted_non_function);
+ return;
+ }
+ if (const FunctionDecl *Prev = Fn->getPreviousDeclaration()) {
+ Diag(DelLoc, diag::err_deleted_decl_not_first);
+ Diag(Prev->getLocation(), diag::note_previous_declaration);
+ // If the declaration wasn't the first, we delete the function anyway for
+ // recovery.
+ }
+ Fn->setDeleted();
+}
+
+static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
+ for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end(); CI != E;
+ ++CI) {
+ Stmt *SubStmt = *CI;
+ if (!SubStmt)
+ continue;
+ if (isa<ReturnStmt>(SubStmt))
+ Self.Diag(SubStmt->getSourceRange().getBegin(),
+ diag::err_return_in_constructor_handler);
+ if (!isa<Expr>(SubStmt))
+ SearchForReturnInStmt(Self, SubStmt);
+ }
+}
+
+void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) {
+ for (unsigned I = 0, E = TryBlock->getNumHandlers(); I != E; ++I) {
+ CXXCatchStmt *Handler = TryBlock->getHandler(I);
+ SearchForReturnInStmt(*this, Handler);
+ }
+}
+
+bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ QualType NewTy = New->getType()->getAsFunctionType()->getResultType();
+ QualType OldTy = Old->getType()->getAsFunctionType()->getResultType();
+
+ QualType CNewTy = Context.getCanonicalType(NewTy);
+ QualType COldTy = Context.getCanonicalType(OldTy);
+
+ if (CNewTy == COldTy &&
+ CNewTy.getCVRQualifiers() == COldTy.getCVRQualifiers())
+ return false;
+
+ // Check if the return types are covariant
+ QualType NewClassTy, OldClassTy;
+
+ /// Both types must be pointers or references to classes.
+ if (PointerType *NewPT = dyn_cast<PointerType>(NewTy)) {
+ if (PointerType *OldPT = dyn_cast<PointerType>(OldTy)) {
+ NewClassTy = NewPT->getPointeeType();
+ OldClassTy = OldPT->getPointeeType();
+ }
+ } else if (ReferenceType *NewRT = dyn_cast<ReferenceType>(NewTy)) {
+ if (ReferenceType *OldRT = dyn_cast<ReferenceType>(OldTy)) {
+ NewClassTy = NewRT->getPointeeType();
+ OldClassTy = OldRT->getPointeeType();
+ }
+ }
+
+ // The return types aren't either both pointers or references to a class type.
+ if (NewClassTy.isNull()) {
+ Diag(New->getLocation(),
+ diag::err_different_return_type_for_overriding_virtual_function)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+
+ return true;
+ }
+
+ if (NewClassTy.getUnqualifiedType() != OldClassTy.getUnqualifiedType()) {
+ // Check if the new class derives from the old class.
+ if (!IsDerivedFrom(NewClassTy, OldClassTy)) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_not_derived)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ }
+
+ // Check if we the conversion from derived to base is valid.
+ if (CheckDerivedToBaseConversion(NewClassTy, OldClassTy,
+ diag::err_covariant_return_inaccessible_base,
+ diag::err_covariant_return_ambiguous_derived_to_base_conv,
+ // FIXME: Should this point to the return type?
+ New->getLocation(), SourceRange(), New->getDeclName())) {
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ }
+ }
+
+ // The qualifiers of the return types must be the same.
+ if (CNewTy.getCVRQualifiers() != COldTy.getCVRQualifiers()) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_type_different_qualifications)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ };
+
+
+ // The new class type must have the same or less qualifiers as the old type.
+ if (NewClassTy.isMoreQualifiedThan(OldClassTy)) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_type_class_type_more_qualified)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ };
+
+ return false;
+}
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
new file mode 100644
index 0000000..8f58034
--- /dev/null
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -0,0 +1,2166 @@
+//===--- SemaDeclObjC.cpp - Semantic Analysis for ObjC Declarations -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective C declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Parse/DeclSpec.h"
+using namespace clang;
+
+bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
+ ObjCMethodDecl *GetterMethod,
+ SourceLocation Loc) {
+ if (GetterMethod &&
+ GetterMethod->getResultType() != property->getType()) {
+ AssignConvertType result = Incompatible;
+ if (Context.isObjCObjectPointerType(property->getType()))
+ result = CheckAssignmentConstraints(GetterMethod->getResultType(), property->getType());
+ if (result != Compatible) {
+ Diag(Loc, diag::warn_accessor_property_type_mismatch)
+ << property->getDeclName()
+ << GetterMethod->getSelector();
+ Diag(GetterMethod->getLocation(), diag::note_declared_at);
+ return true;
+ }
+ }
+ return false;
+}
+
+/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
+/// and user declared, in the method definition's AST.
+void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, DeclPtrTy D) {
+ assert(getCurMethodDecl() == 0 && "Method parsing confused");
+ ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D.getAs<Decl>());
+
+ // If we don't have a valid method decl, simply return.
+ if (!MDecl)
+ return;
+
+ CurFunctionNeedsScopeChecking = false;
+
+ // Allow the rest of sema to find private method decl implementations.
+ if (MDecl->isInstanceMethod())
+ AddInstanceMethodToGlobalPool(MDecl);
+ else
+ AddFactoryMethodToGlobalPool(MDecl);
+
+ // Allow all of Sema to see that we are entering a method definition.
+ PushDeclContext(FnBodyScope, MDecl);
+
+ // Create Decl objects for each parameter, entrring them in the scope for
+ // binding to their use.
+
+ // Insert the invisible arguments, self and _cmd!
+ MDecl->createImplicitParams(Context, MDecl->getClassInterface());
+
+ PushOnScopeChains(MDecl->getSelfDecl(), FnBodyScope);
+ PushOnScopeChains(MDecl->getCmdDecl(), FnBodyScope);
+
+ // Introduce all of the other parameters into this scope.
+ for (ObjCMethodDecl::param_iterator PI = MDecl->param_begin(),
+ E = MDecl->param_end(); PI != E; ++PI)
+ if ((*PI)->getIdentifier())
+ PushOnScopeChains(*PI, FnBodyScope);
+}
+
+Sema::DeclPtrTy Sema::
+ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperName, SourceLocation SuperLoc,
+ const DeclPtrTy *ProtoRefs, unsigned NumProtoRefs,
+ SourceLocation EndProtoLoc, AttributeList *AttrList) {
+ assert(ClassName && "Missing class identifier");
+
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl = LookupName(TUScope, ClassName, LookupOrdinaryName);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(ClassLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ }
+
+ ObjCInterfaceDecl* IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ if (IDecl) {
+ // Class already seen. Is it a forward declaration?
+ if (!IDecl->isForwardDecl()) {
+ IDecl->setInvalidDecl();
+ Diag(AtInterfaceLoc, diag::err_duplicate_class_def)<<IDecl->getDeclName();
+ Diag(IDecl->getLocation(), diag::note_previous_definition);
+
+ // Return the previous class interface.
+ // FIXME: don't leak the objects passed in!
+ return DeclPtrTy::make(IDecl);
+ } else {
+ IDecl->setLocation(AtInterfaceLoc);
+ IDecl->setForwardDecl(false);
+ }
+ } else {
+ IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtInterfaceLoc,
+ ClassName, ClassLoc);
+ if (AttrList)
+ ProcessDeclAttributeList(IDecl, AttrList);
+
+ PushOnScopeChains(IDecl, TUScope);
+ }
+
+ if (SuperName) {
+ // Check if a different kind of symbol declared in this scope.
+ PrevDecl = LookupName(TUScope, SuperName, LookupOrdinaryName);
+
+ ObjCInterfaceDecl *SuperClassDecl =
+ dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+
+ // Diagnose classes that inherit from deprecated classes.
+ if (SuperClassDecl)
+ (void)DiagnoseUseOfDecl(SuperClassDecl, SuperLoc);
+
+ if (PrevDecl && SuperClassDecl == 0) {
+ // The previous declaration was not a class decl. Check if we have a
+ // typedef. If we do, get the underlying class type.
+ if (const TypedefDecl *TDecl = dyn_cast_or_null<TypedefDecl>(PrevDecl)) {
+ QualType T = TDecl->getUnderlyingType();
+ if (T->isObjCInterfaceType()) {
+ if (NamedDecl *IDecl = T->getAsObjCInterfaceType()->getDecl())
+ SuperClassDecl = dyn_cast<ObjCInterfaceDecl>(IDecl);
+ }
+ }
+
+ // This handles the following case:
+ //
+ // typedef int SuperClass;
+ // @interface MyClass : SuperClass {} @end
+ //
+ if (!SuperClassDecl) {
+ Diag(SuperLoc, diag::err_redefinition_different_kind) << SuperName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ }
+ }
+
+ if (!dyn_cast_or_null<TypedefDecl>(PrevDecl)) {
+ if (!SuperClassDecl)
+ Diag(SuperLoc, diag::err_undef_superclass)
+ << SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
+ else if (SuperClassDecl->isForwardDecl())
+ Diag(SuperLoc, diag::err_undef_superclass)
+ << SuperClassDecl->getDeclName() << ClassName
+ << SourceRange(AtInterfaceLoc, ClassLoc);
+ }
+ IDecl->setSuperClass(SuperClassDecl);
+ IDecl->setSuperClassLoc(SuperLoc);
+ IDecl->setLocEnd(SuperLoc);
+ } else { // we have a root class.
+ IDecl->setLocEnd(ClassLoc);
+ }
+
+ /// Check then save referenced protocols.
+ if (NumProtoRefs) {
+ IDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
+ Context);
+ IDecl->setLocEnd(EndProtoLoc);
+ }
+
+ CheckObjCDeclScope(IDecl);
+ return DeclPtrTy::make(IDecl);
+}
+
+/// ActOnCompatiblityAlias - this action is called after complete parsing of
+/// @compatibility_alias declaration. It sets up the alias relationships.
+Sema::DeclPtrTy Sema::ActOnCompatiblityAlias(SourceLocation AtLoc,
+ IdentifierInfo *AliasName,
+ SourceLocation AliasLocation,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLocation) {
+ // Look for previous declaration of alias name
+ NamedDecl *ADecl = LookupName(TUScope, AliasName, LookupOrdinaryName);
+ if (ADecl) {
+ if (isa<ObjCCompatibleAliasDecl>(ADecl))
+ Diag(AliasLocation, diag::warn_previous_alias_decl);
+ else
+ Diag(AliasLocation, diag::err_conflicting_aliasing_type) << AliasName;
+ Diag(ADecl->getLocation(), diag::note_previous_declaration);
+ return DeclPtrTy();
+ }
+ // Check for class declaration
+ NamedDecl *CDeclU = LookupName(TUScope, ClassName, LookupOrdinaryName);
+ if (const TypedefDecl *TDecl = dyn_cast_or_null<TypedefDecl>(CDeclU)) {
+ QualType T = TDecl->getUnderlyingType();
+ if (T->isObjCInterfaceType()) {
+ if (NamedDecl *IDecl = T->getAsObjCInterfaceType()->getDecl()) {
+ ClassName = IDecl->getIdentifier();
+ CDeclU = LookupName(TUScope, ClassName, LookupOrdinaryName);
+ }
+ }
+ }
+ ObjCInterfaceDecl *CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDeclU);
+ if (CDecl == 0) {
+ Diag(ClassLocation, diag::warn_undef_interface) << ClassName;
+ if (CDeclU)
+ Diag(CDeclU->getLocation(), diag::note_previous_declaration);
+ return DeclPtrTy();
+ }
+
+ // Everything checked out, instantiate a new alias declaration AST.
+ ObjCCompatibleAliasDecl *AliasDecl =
+ ObjCCompatibleAliasDecl::Create(Context, CurContext, AtLoc, AliasName, CDecl);
+
+ if (!CheckObjCDeclScope(AliasDecl))
+ PushOnScopeChains(AliasDecl, TUScope);
+
+ return DeclPtrTy::make(AliasDecl);
+}
+
+void Sema::CheckForwardProtocolDeclarationForCircularDependency(
+ IdentifierInfo *PName,
+ SourceLocation &Ploc, SourceLocation PrevLoc,
+ const ObjCList<ObjCProtocolDecl> &PList)
+{
+ for (ObjCList<ObjCProtocolDecl>::iterator I = PList.begin(),
+ E = PList.end(); I != E; ++I) {
+
+ if (ObjCProtocolDecl *PDecl = LookupProtocol((*I)->getIdentifier())) {
+ if (PDecl->getIdentifier() == PName) {
+ Diag(Ploc, diag::err_protocol_has_circular_dependency);
+ Diag(PrevLoc, diag::note_previous_definition);
+ }
+ CheckForwardProtocolDeclarationForCircularDependency(PName, Ploc,
+ PDecl->getLocation(), PDecl->getReferencedProtocols());
+ }
+ }
+}
+
+Sema::DeclPtrTy
+Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
+ IdentifierInfo *ProtocolName,
+ SourceLocation ProtocolLoc,
+ const DeclPtrTy *ProtoRefs,
+ unsigned NumProtoRefs,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList) {
+ // FIXME: Deal with AttrList.
+ assert(ProtocolName && "Missing protocol identifier");
+ ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolName);
+ if (PDecl) {
+ // Protocol already seen. Better be a forward protocol declaration
+ if (!PDecl->isForwardDecl()) {
+ Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName;
+ Diag(PDecl->getLocation(), diag::note_previous_definition);
+ // Just return the protocol we already had.
+ // FIXME: don't leak the objects passed in!
+ return DeclPtrTy::make(PDecl);
+ }
+ ObjCList<ObjCProtocolDecl> PList;
+ PList.set((ObjCProtocolDecl *const*)ProtoRefs, NumProtoRefs, Context);
+ CheckForwardProtocolDeclarationForCircularDependency(
+ ProtocolName, ProtocolLoc, PDecl->getLocation(), PList);
+ PList.Destroy(Context);
+
+ // Make sure the cached decl gets a valid start location.
+ PDecl->setLocation(AtProtoInterfaceLoc);
+ PDecl->setForwardDecl(false);
+ } else {
+ PDecl = ObjCProtocolDecl::Create(Context, CurContext,
+ AtProtoInterfaceLoc,ProtocolName);
+ PushOnScopeChains(PDecl, TUScope);
+ PDecl->setForwardDecl(false);
+ }
+ if (AttrList)
+ ProcessDeclAttributeList(PDecl, AttrList);
+ if (NumProtoRefs) {
+ /// Check then save referenced protocols.
+ PDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,Context);
+ PDecl->setLocEnd(EndProtoLoc);
+ }
+
+ CheckObjCDeclScope(PDecl);
+ return DeclPtrTy::make(PDecl);
+}
+
+/// FindProtocolDeclaration - This routine looks up protocols and
+/// issues an error if they are not declared. It returns list of
+/// protocol declarations in its 'Protocols' argument.
+void
+Sema::FindProtocolDeclaration(bool WarnOnDeclarations,
+ const IdentifierLocPair *ProtocolId,
+ unsigned NumProtocols,
+ llvm::SmallVectorImpl<DeclPtrTy> &Protocols) {
+ for (unsigned i = 0; i != NumProtocols; ++i) {
+ ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolId[i].first);
+ if (!PDecl) {
+ Diag(ProtocolId[i].second, diag::err_undeclared_protocol)
+ << ProtocolId[i].first;
+ continue;
+ }
+
+ (void)DiagnoseUseOfDecl(PDecl, ProtocolId[i].second);
+
+ // If this is a forward declaration and we are supposed to warn in this
+ // case, do it.
+ if (WarnOnDeclarations && PDecl->isForwardDecl())
+ Diag(ProtocolId[i].second, diag::warn_undef_protocolref)
+ << ProtocolId[i].first;
+ Protocols.push_back(DeclPtrTy::make(PDecl));
+ }
+}
+
+/// DiagnosePropertyMismatch - Compares two properties for their
+/// attributes and types and warns on a variety of inconsistencies.
+///
+void
+Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
+ ObjCPropertyDecl *SuperProperty,
+ const IdentifierInfo *inheritedName) {
+ ObjCPropertyDecl::PropertyAttributeKind CAttr =
+ Property->getPropertyAttributes();
+ ObjCPropertyDecl::PropertyAttributeKind SAttr =
+ SuperProperty->getPropertyAttributes();
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_readonly)
+ && (SAttr & ObjCPropertyDecl::OBJC_PR_readwrite))
+ Diag(Property->getLocation(), diag::warn_readonly_property)
+ << Property->getDeclName() << inheritedName;
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_copy)
+ != (SAttr & ObjCPropertyDecl::OBJC_PR_copy))
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "copy" << inheritedName;
+ else if ((CAttr & ObjCPropertyDecl::OBJC_PR_retain)
+ != (SAttr & ObjCPropertyDecl::OBJC_PR_retain))
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "retain" << inheritedName;
+
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ != (SAttr & ObjCPropertyDecl::OBJC_PR_nonatomic))
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "atomic" << inheritedName;
+ if (Property->getSetterName() != SuperProperty->getSetterName())
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "setter" << inheritedName;
+ if (Property->getGetterName() != SuperProperty->getGetterName())
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "getter" << inheritedName;
+
+ QualType LHSType =
+ Context.getCanonicalType(SuperProperty->getType());
+ QualType RHSType =
+ Context.getCanonicalType(Property->getType());
+
+ if (!Context.typesAreCompatible(LHSType, RHSType)) {
+ // FIXME: Incorporate this test with typesAreCompatible.
+ if (LHSType->isObjCQualifiedIdType() && RHSType->isObjCQualifiedIdType())
+ if (ObjCQualifiedIdTypesAreCompatible(LHSType, RHSType, false))
+ return;
+ Diag(Property->getLocation(), diag::warn_property_types_are_incompatible)
+ << Property->getType() << SuperProperty->getType() << inheritedName;
+ }
+}
+
+/// ComparePropertiesInBaseAndSuper - This routine compares property
+/// declarations in base and its super class, if any, and issues
+/// diagnostics in a variety of inconsistant situations.
+///
+void Sema::ComparePropertiesInBaseAndSuper(ObjCInterfaceDecl *IDecl) {
+ ObjCInterfaceDecl *SDecl = IDecl->getSuperClass();
+ if (!SDecl)
+ return;
+ // FIXME: O(N^2)
+ for (ObjCInterfaceDecl::prop_iterator S = SDecl->prop_begin(Context),
+ E = SDecl->prop_end(Context); S != E; ++S) {
+ ObjCPropertyDecl *SuperPDecl = (*S);
+ // Does property in super class has declaration in current class?
+ for (ObjCInterfaceDecl::prop_iterator I = IDecl->prop_begin(Context),
+ E = IDecl->prop_end(Context); I != E; ++I) {
+ ObjCPropertyDecl *PDecl = (*I);
+ if (SuperPDecl->getIdentifier() == PDecl->getIdentifier())
+ DiagnosePropertyMismatch(PDecl, SuperPDecl,
+ SDecl->getIdentifier());
+ }
+ }
+}
+
+/// MergeOneProtocolPropertiesIntoClass - This routine goes thru the list
+/// of properties declared in a protocol and adds them to the list
+/// of properties for current class/category if it is not there already.
+void
+Sema::MergeOneProtocolPropertiesIntoClass(Decl *CDecl,
+ ObjCProtocolDecl *PDecl) {
+ ObjCInterfaceDecl *IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDecl);
+ if (!IDecl) {
+ // Category
+ ObjCCategoryDecl *CatDecl = static_cast<ObjCCategoryDecl*>(CDecl);
+ assert (CatDecl && "MergeOneProtocolPropertiesIntoClass");
+ for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(Context),
+ E = PDecl->prop_end(Context); P != E; ++P) {
+ ObjCPropertyDecl *Pr = (*P);
+ ObjCCategoryDecl::prop_iterator CP, CE;
+ // Is this property already in category's list of properties?
+ for (CP = CatDecl->prop_begin(Context), CE = CatDecl->prop_end(Context);
+ CP != CE; ++CP)
+ if ((*CP)->getIdentifier() == Pr->getIdentifier())
+ break;
+ if (CP != CE)
+ // Property protocol already exist in class. Diagnose any mismatch.
+ DiagnosePropertyMismatch((*CP), Pr, PDecl->getIdentifier());
+ }
+ return;
+ }
+ for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(Context),
+ E = PDecl->prop_end(Context); P != E; ++P) {
+ ObjCPropertyDecl *Pr = (*P);
+ ObjCInterfaceDecl::prop_iterator CP, CE;
+ // Is this property already in class's list of properties?
+ for (CP = IDecl->prop_begin(Context), CE = IDecl->prop_end(Context);
+ CP != CE; ++CP)
+ if ((*CP)->getIdentifier() == Pr->getIdentifier())
+ break;
+ if (CP != CE)
+ // Property protocol already exist in class. Diagnose any mismatch.
+ DiagnosePropertyMismatch((*CP), Pr, PDecl->getIdentifier());
+ }
+}
+
+/// MergeProtocolPropertiesIntoClass - This routine merges properties
+/// declared in 'MergeItsProtocols' objects (which can be a class or an
+/// inherited protocol into the list of properties for class/category 'CDecl'
+///
+void Sema::MergeProtocolPropertiesIntoClass(Decl *CDecl,
+ DeclPtrTy MergeItsProtocols) {
+ Decl *ClassDecl = MergeItsProtocols.getAs<Decl>();
+ ObjCInterfaceDecl *IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDecl);
+
+ if (!IDecl) {
+ // Category
+ ObjCCategoryDecl *CatDecl = static_cast<ObjCCategoryDecl*>(CDecl);
+ assert (CatDecl && "MergeProtocolPropertiesIntoClass");
+ if (ObjCCategoryDecl *MDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
+ for (ObjCCategoryDecl::protocol_iterator P = MDecl->protocol_begin(),
+ E = MDecl->protocol_end(); P != E; ++P)
+ // Merge properties of category (*P) into IDECL's
+ MergeOneProtocolPropertiesIntoClass(CatDecl, *P);
+
+ // Go thru the list of protocols for this category and recursively merge
+ // their properties into this class as well.
+ for (ObjCCategoryDecl::protocol_iterator P = CatDecl->protocol_begin(),
+ E = CatDecl->protocol_end(); P != E; ++P)
+ MergeProtocolPropertiesIntoClass(CatDecl, DeclPtrTy::make(*P));
+ } else {
+ ObjCProtocolDecl *MD = cast<ObjCProtocolDecl>(ClassDecl);
+ for (ObjCProtocolDecl::protocol_iterator P = MD->protocol_begin(),
+ E = MD->protocol_end(); P != E; ++P)
+ MergeOneProtocolPropertiesIntoClass(CatDecl, *P);
+ }
+ return;
+ }
+
+ if (ObjCInterfaceDecl *MDecl = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
+ for (ObjCInterfaceDecl::protocol_iterator P = MDecl->protocol_begin(),
+ E = MDecl->protocol_end(); P != E; ++P)
+ // Merge properties of class (*P) into IDECL's
+ MergeOneProtocolPropertiesIntoClass(IDecl, *P);
+
+ // Go thru the list of protocols for this class and recursively merge
+ // their properties into this class as well.
+ for (ObjCInterfaceDecl::protocol_iterator P = IDecl->protocol_begin(),
+ E = IDecl->protocol_end(); P != E; ++P)
+ MergeProtocolPropertiesIntoClass(IDecl, DeclPtrTy::make(*P));
+ } else {
+ ObjCProtocolDecl *MD = cast<ObjCProtocolDecl>(ClassDecl);
+ for (ObjCProtocolDecl::protocol_iterator P = MD->protocol_begin(),
+ E = MD->protocol_end(); P != E; ++P)
+ MergeOneProtocolPropertiesIntoClass(IDecl, *P);
+ }
+}
+
+/// DiagnoseClassExtensionDupMethods - Check for duplicate declaration of
+/// a class method in its extension.
+///
+void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
+ ObjCInterfaceDecl *ID) {
+ if (!ID)
+ return; // Possibly due to previous error
+
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> MethodMap;
+ for (ObjCInterfaceDecl::method_iterator i = ID->meth_begin(Context),
+ e = ID->meth_end(Context); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ MethodMap[MD->getSelector()] = MD;
+ }
+
+ if (MethodMap.empty())
+ return;
+ for (ObjCCategoryDecl::method_iterator i = CAT->meth_begin(Context),
+ e = CAT->meth_end(Context); i != e; ++i) {
+ ObjCMethodDecl *Method = *i;
+ const ObjCMethodDecl *&PrevMethod = MethodMap[Method->getSelector()];
+ if (PrevMethod && !MatchTwoMethodDeclarations(Method, PrevMethod)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ }
+}
+
+/// ActOnForwardProtocolDeclaration - Handle @protocol foo;
+Action::DeclPtrTy
+Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
+ const IdentifierLocPair *IdentList,
+ unsigned NumElts,
+ AttributeList *attrList) {
+ llvm::SmallVector<ObjCProtocolDecl*, 32> Protocols;
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ IdentifierInfo *Ident = IdentList[i].first;
+ ObjCProtocolDecl *PDecl = LookupProtocol(Ident);
+ if (PDecl == 0) { // Not already seen?
+ PDecl = ObjCProtocolDecl::Create(Context, CurContext,
+ IdentList[i].second, Ident);
+ PushOnScopeChains(PDecl, TUScope);
+ }
+ if (attrList)
+ ProcessDeclAttributeList(PDecl, attrList);
+ Protocols.push_back(PDecl);
+ }
+
+ ObjCForwardProtocolDecl *PDecl =
+ ObjCForwardProtocolDecl::Create(Context, CurContext, AtProtocolLoc,
+ &Protocols[0], Protocols.size());
+ CurContext->addDecl(Context, PDecl);
+ CheckObjCDeclScope(PDecl);
+ return DeclPtrTy::make(PDecl);
+}
+
+Sema::DeclPtrTy Sema::
+ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *CategoryName,
+ SourceLocation CategoryLoc,
+ const DeclPtrTy *ProtoRefs,
+ unsigned NumProtoRefs,
+ SourceLocation EndProtoLoc) {
+ ObjCCategoryDecl *CDecl =
+ ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc, CategoryName);
+ // FIXME: PushOnScopeChains?
+ CurContext->addDecl(Context, CDecl);
+
+ ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName);
+ /// Check that class of this category is already completely declared.
+ if (!IDecl || IDecl->isForwardDecl()) {
+ CDecl->setInvalidDecl();
+ Diag(ClassLoc, diag::err_undef_interface) << ClassName;
+ return DeclPtrTy::make(CDecl);
+ }
+
+ CDecl->setClassInterface(IDecl);
+
+ // If the interface is deprecated, warn about it.
+ (void)DiagnoseUseOfDecl(IDecl, ClassLoc);
+
+ /// Check for duplicate interface declaration for this category
+ ObjCCategoryDecl *CDeclChain;
+ for (CDeclChain = IDecl->getCategoryList(); CDeclChain;
+ CDeclChain = CDeclChain->getNextClassCategory()) {
+ if (CategoryName && CDeclChain->getIdentifier() == CategoryName) {
+ Diag(CategoryLoc, diag::warn_dup_category_def)
+ << ClassName << CategoryName;
+ Diag(CDeclChain->getLocation(), diag::note_previous_definition);
+ break;
+ }
+ }
+ if (!CDeclChain)
+ CDecl->insertNextClassCategory();
+
+ if (NumProtoRefs) {
+ CDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,Context);
+ CDecl->setLocEnd(EndProtoLoc);
+ }
+
+ CheckObjCDeclScope(CDecl);
+ return DeclPtrTy::make(CDecl);
+}
+
+/// ActOnStartCategoryImplementation - Perform semantic checks on the
+/// category implementation declaration and build an ObjCCategoryImplDecl
+/// object.
+Sema::DeclPtrTy Sema::ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *CatName, SourceLocation CatLoc) {
+ ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName);
+ ObjCCategoryImplDecl *CDecl =
+ ObjCCategoryImplDecl::Create(Context, CurContext, AtCatImplLoc, CatName,
+ IDecl);
+ /// Check that class of this category is already completely declared.
+ if (!IDecl || IDecl->isForwardDecl())
+ Diag(ClassLoc, diag::err_undef_interface) << ClassName;
+
+ // FIXME: PushOnScopeChains?
+ CurContext->addDecl(Context, CDecl);
+
+ /// TODO: Check that CatName, category name, is not used in another
+ // implementation.
+ ObjCCategoryImpls.push_back(CDecl);
+
+ CheckObjCDeclScope(CDecl);
+ return DeclPtrTy::make(CDecl);
+}
+
+Sema::DeclPtrTy Sema::ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc) {
+ ObjCInterfaceDecl* IDecl = 0;
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl = LookupName(TUScope, ClassName, LookupOrdinaryName);
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else {
+ // Is there an interface declaration of this class; if not, warn!
+ IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ if (!IDecl || IDecl->isForwardDecl()) {
+ Diag(ClassLoc, diag::warn_undef_interface) << ClassName;
+ IDecl = 0;
+ }
+ }
+
+ // Check that super class name is valid class name
+ ObjCInterfaceDecl* SDecl = 0;
+ if (SuperClassname) {
+ // Check if a different kind of symbol declared in this scope.
+ PrevDecl = LookupName(TUScope, SuperClassname, LookupOrdinaryName);
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(SuperClassLoc, diag::err_redefinition_different_kind)
+ << SuperClassname;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else {
+ SDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ if (!SDecl)
+ Diag(SuperClassLoc, diag::err_undef_superclass)
+ << SuperClassname << ClassName;
+ else if (IDecl && IDecl->getSuperClass() != SDecl) {
+ // This implementation and its interface do not have the same
+ // super class.
+ Diag(SuperClassLoc, diag::err_conflicting_super_class)
+ << SDecl->getDeclName();
+ Diag(SDecl->getLocation(), diag::note_previous_definition);
+ }
+ }
+ }
+
+ if (!IDecl) {
+ // Legacy case of @implementation with no corresponding @interface.
+ // Build, chain & install the interface decl into the identifier.
+
+ // FIXME: Do we support attributes on the @implementation? If so we should
+ // copy them over.
+ IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassImplLoc,
+ ClassName, ClassLoc, false, true);
+ IDecl->setSuperClass(SDecl);
+ IDecl->setLocEnd(ClassLoc);
+
+ PushOnScopeChains(IDecl, TUScope);
+ } else {
+ // Mark the interface as being completed, even if it was just as
+ // @class ....;
+ // declaration; the user cannot reopen it.
+ IDecl->setForwardDecl(false);
+ }
+
+ ObjCImplementationDecl* IMPDecl =
+ ObjCImplementationDecl::Create(Context, CurContext, AtClassImplLoc,
+ IDecl, SDecl);
+
+ if (CheckObjCDeclScope(IMPDecl))
+ return DeclPtrTy::make(IMPDecl);
+
+ // Check that there is no duplicate implementation of this class.
+ if (LookupObjCImplementation(ClassName))
+ // FIXME: Don't leak everything!
+ Diag(ClassLoc, diag::err_dup_implementation_class) << ClassName;
+ else // add it to the list.
+ PushOnScopeChains(IMPDecl, TUScope);
+ return DeclPtrTy::make(IMPDecl);
+}
+
+void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
+ ObjCIvarDecl **ivars, unsigned numIvars,
+ SourceLocation RBrace) {
+ assert(ImpDecl && "missing implementation decl");
+ ObjCInterfaceDecl* IDecl = ImpDecl->getClassInterface();
+ if (!IDecl)
+ return;
+ /// Check case of non-existing @interface decl.
+ /// (legacy objective-c @implementation decl without an @interface decl).
+ /// Add implementations's ivar to the synthesize class's ivar list.
+ if (IDecl->isImplicitInterfaceDecl()) {
+ IDecl->setIVarList(ivars, numIvars, Context);
+ IDecl->setLocEnd(RBrace);
+ return;
+ }
+ // If implementation has empty ivar list, just return.
+ if (numIvars == 0)
+ return;
+
+ assert(ivars && "missing @implementation ivars");
+
+ // Check interface's Ivar list against those in the implementation.
+ // names and types must match.
+ //
+ unsigned j = 0;
+ ObjCInterfaceDecl::ivar_iterator
+ IVI = IDecl->ivar_begin(), IVE = IDecl->ivar_end();
+ for (; numIvars > 0 && IVI != IVE; ++IVI) {
+ ObjCIvarDecl* ImplIvar = ivars[j++];
+ ObjCIvarDecl* ClsIvar = *IVI;
+ assert (ImplIvar && "missing implementation ivar");
+ assert (ClsIvar && "missing class ivar");
+
+ // First, make sure the types match.
+ if (Context.getCanonicalType(ImplIvar->getType()) !=
+ Context.getCanonicalType(ClsIvar->getType())) {
+ Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_type)
+ << ImplIvar->getIdentifier()
+ << ImplIvar->getType() << ClsIvar->getType();
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ } else if (ImplIvar->isBitField() && ClsIvar->isBitField()) {
+ Expr *ImplBitWidth = ImplIvar->getBitWidth();
+ Expr *ClsBitWidth = ClsIvar->getBitWidth();
+ if (ImplBitWidth->EvaluateAsInt(Context).getZExtValue() !=
+ ClsBitWidth->EvaluateAsInt(Context).getZExtValue()) {
+ Diag(ImplBitWidth->getLocStart(), diag::err_conflicting_ivar_bitwidth)
+ << ImplIvar->getIdentifier();
+ Diag(ClsBitWidth->getLocStart(), diag::note_previous_definition);
+ }
+ }
+ // Make sure the names are identical.
+ if (ImplIvar->getIdentifier() != ClsIvar->getIdentifier()) {
+ Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_name)
+ << ImplIvar->getIdentifier() << ClsIvar->getIdentifier();
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ }
+ --numIvars;
+ }
+
+ if (numIvars > 0)
+ Diag(ivars[j]->getLocation(), diag::err_inconsistant_ivar_count);
+ else if (IVI != IVE)
+ Diag((*IVI)->getLocation(), diag::err_inconsistant_ivar_count);
+}
+
+void Sema::WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
+ bool &IncompleteImpl) {
+ if (!IncompleteImpl) {
+ Diag(ImpLoc, diag::warn_incomplete_impl);
+ IncompleteImpl = true;
+ }
+ Diag(ImpLoc, diag::warn_undef_method_impl) << method->getDeclName();
+}
+
+void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
+ ObjCMethodDecl *IntfMethodDecl) {
+ if (!Context.typesAreCompatible(IntfMethodDecl->getResultType(),
+ ImpMethodDecl->getResultType()) &&
+ !QualifiedIdConformsQualifiedId(IntfMethodDecl->getResultType(),
+ ImpMethodDecl->getResultType())) {
+ Diag(ImpMethodDecl->getLocation(), diag::warn_conflicting_ret_types)
+ << ImpMethodDecl->getDeclName() << IntfMethodDecl->getResultType()
+ << ImpMethodDecl->getResultType();
+ Diag(IntfMethodDecl->getLocation(), diag::note_previous_definition);
+ }
+
+ for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
+ IF = IntfMethodDecl->param_begin(), EM = ImpMethodDecl->param_end();
+ IM != EM; ++IM, ++IF) {
+ if (Context.typesAreCompatible((*IF)->getType(), (*IM)->getType()) ||
+ QualifiedIdConformsQualifiedId((*IF)->getType(), (*IM)->getType()))
+ continue;
+
+ Diag((*IM)->getLocation(), diag::warn_conflicting_param_types)
+ << ImpMethodDecl->getDeclName() << (*IF)->getType()
+ << (*IM)->getType();
+ Diag((*IF)->getLocation(), diag::note_previous_definition);
+ }
+}
+
+/// isPropertyReadonly - Return true if property is readonly, by searching
+/// for the property in the class and in its categories and implementations
+///
+bool Sema::isPropertyReadonly(ObjCPropertyDecl *PDecl,
+ ObjCInterfaceDecl *IDecl) {
+ // by far the most common case.
+ if (!PDecl->isReadOnly())
+ return false;
+ // Even if property is ready only, if interface has a user defined setter,
+ // it is not considered read only.
+ if (IDecl->getInstanceMethod(Context, PDecl->getSetterName()))
+ return false;
+
+ // Main class has the property as 'readonly'. Must search
+ // through the category list to see if the property's
+ // attribute has been over-ridden to 'readwrite'.
+ for (ObjCCategoryDecl *Category = IDecl->getCategoryList();
+ Category; Category = Category->getNextClassCategory()) {
+ // Even if property is ready only, if a category has a user defined setter,
+ // it is not considered read only.
+ if (Category->getInstanceMethod(Context, PDecl->getSetterName()))
+ return false;
+ ObjCPropertyDecl *P =
+ Category->FindPropertyDeclaration(Context, PDecl->getIdentifier());
+ if (P && !P->isReadOnly())
+ return false;
+ }
+
+ // Also, check for definition of a setter method in the implementation if
+ // all else failed.
+ if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(CurContext)) {
+ if (ObjCImplementationDecl *IMD =
+ dyn_cast<ObjCImplementationDecl>(OMD->getDeclContext())) {
+ if (IMD->getInstanceMethod(Context, PDecl->getSetterName()))
+ return false;
+ }
+ else if (ObjCCategoryImplDecl *CIMD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ if (CIMD->getInstanceMethod(Context, PDecl->getSetterName()))
+ return false;
+ }
+ }
+ // Lastly, look through the implementation (if one is in scope).
+ if (ObjCImplementationDecl *ImpDecl
+ = LookupObjCImplementation(IDecl->getIdentifier()))
+ if (ImpDecl->getInstanceMethod(Context, PDecl->getSetterName()))
+ return false;
+ // If all fails, look at the super class.
+ if (ObjCInterfaceDecl *SIDecl = IDecl->getSuperClass())
+ return isPropertyReadonly(PDecl, SIDecl);
+ return true;
+}
+
+/// FIXME: Type hierarchies in Objective-C can be deep. We could most likely
+/// improve the efficiency of selector lookups and type checking by associating
+/// with each protocol / interface / category the flattened instance tables. If
+/// we used an immutable set to keep the table then it wouldn't add significant
+/// memory cost and it would be handy for lookups.
+
+/// CheckProtocolMethodDefs - This routine checks unimplemented methods
+/// Declared in protocol, and those referenced by it.
+void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
+ ObjCProtocolDecl *PDecl,
+ bool& IncompleteImpl,
+ const llvm::DenseSet<Selector> &InsMap,
+ const llvm::DenseSet<Selector> &ClsMap,
+ ObjCInterfaceDecl *IDecl) {
+ ObjCInterfaceDecl *Super = IDecl->getSuperClass();
+ ObjCInterfaceDecl *NSIDecl = 0;
+ if (getLangOptions().NeXTRuntime) {
+ // check to see if class implements forwardInvocation method and objects
+ // of this class are derived from 'NSProxy' so that to forward requests
+ // from one object to another.
+ // Under such conditions, which means that every method possible is
+ // implemented in the class, we should not issue "Method definition not
+ // found" warnings.
+ // FIXME: Use a general GetUnarySelector method for this.
+ IdentifierInfo* II = &Context.Idents.get("forwardInvocation");
+ Selector fISelector = Context.Selectors.getSelector(1, &II);
+ if (InsMap.count(fISelector))
+ // Is IDecl derived from 'NSProxy'? If so, no instance methods
+ // need be implemented in the implementation.
+ NSIDecl = IDecl->lookupInheritedClass(&Context.Idents.get("NSProxy"));
+ }
+
+ // If a method lookup fails locally we still need to look and see if
+ // the method was implemented by a base class or an inherited
+ // protocol. This lookup is slow, but occurs rarely in correct code
+ // and otherwise would terminate in a warning.
+
+ // check unimplemented instance methods.
+ if (!NSIDecl)
+ for (ObjCProtocolDecl::instmeth_iterator I = PDecl->instmeth_begin(Context),
+ E = PDecl->instmeth_end(Context); I != E; ++I) {
+ ObjCMethodDecl *method = *I;
+ if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ !method->isSynthesized() && !InsMap.count(method->getSelector()) &&
+ (!Super ||
+ !Super->lookupInstanceMethod(Context, method->getSelector()))) {
+ // Ugly, but necessary. Method declared in protcol might have
+ // have been synthesized due to a property declared in the class which
+ // uses the protocol.
+ ObjCMethodDecl *MethodInClass =
+ IDecl->lookupInstanceMethod(Context, method->getSelector());
+ if (!MethodInClass || !MethodInClass->isSynthesized())
+ WarnUndefinedMethod(ImpLoc, method, IncompleteImpl);
+ }
+ }
+ // check unimplemented class methods
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(Context),
+ E = PDecl->classmeth_end(Context);
+ I != E; ++I) {
+ ObjCMethodDecl *method = *I;
+ if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ !ClsMap.count(method->getSelector()) &&
+ (!Super || !Super->lookupClassMethod(Context, method->getSelector())))
+ WarnUndefinedMethod(ImpLoc, method, IncompleteImpl);
+ }
+ // Check on this protocols's referenced protocols, recursively.
+ for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); PI != E; ++PI)
+ CheckProtocolMethodDefs(ImpLoc, *PI, IncompleteImpl, InsMap, ClsMap, IDecl);
+}
+
+/// MatchAllMethodDeclarations - Check methods declaraed in interface or
+/// or protocol against those declared in their implementations.
+///
+void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
+ const llvm::DenseSet<Selector> &ClsMap,
+ llvm::DenseSet<Selector> &InsMapSeen,
+ llvm::DenseSet<Selector> &ClsMapSeen,
+ ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* CDecl,
+ bool &IncompleteImpl,
+ bool ImmediateClass)
+{
+ // Check and see if instance methods in class interface have been
+ // implemented in the implementation class. If so, their types match.
+ for (ObjCInterfaceDecl::instmeth_iterator I = CDecl->instmeth_begin(Context),
+ E = CDecl->instmeth_end(Context); I != E; ++I) {
+ if (InsMapSeen.count((*I)->getSelector()))
+ continue;
+ InsMapSeen.insert((*I)->getSelector());
+ if (!(*I)->isSynthesized() &&
+ !InsMap.count((*I)->getSelector())) {
+ if (ImmediateClass)
+ WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl);
+ continue;
+ }
+ else {
+ ObjCMethodDecl *ImpMethodDecl =
+ IMPDecl->getInstanceMethod(Context, (*I)->getSelector());
+ ObjCMethodDecl *IntfMethodDecl =
+ CDecl->getInstanceMethod(Context, (*I)->getSelector());
+ assert(IntfMethodDecl &&
+ "IntfMethodDecl is null in ImplMethodsVsClassMethods");
+ // ImpMethodDecl may be null as in a @dynamic property.
+ if (ImpMethodDecl)
+ WarnConflictingTypedMethods(ImpMethodDecl, IntfMethodDecl);
+ }
+ }
+
+ // Check and see if class methods in class interface have been
+ // implemented in the implementation class. If so, their types match.
+ for (ObjCInterfaceDecl::classmeth_iterator
+ I = CDecl->classmeth_begin(Context),
+ E = CDecl->classmeth_end(Context);
+ I != E; ++I) {
+ if (ClsMapSeen.count((*I)->getSelector()))
+ continue;
+ ClsMapSeen.insert((*I)->getSelector());
+ if (!ClsMap.count((*I)->getSelector())) {
+ if (ImmediateClass)
+ WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl);
+ }
+ else {
+ ObjCMethodDecl *ImpMethodDecl =
+ IMPDecl->getClassMethod(Context, (*I)->getSelector());
+ ObjCMethodDecl *IntfMethodDecl =
+ CDecl->getClassMethod(Context, (*I)->getSelector());
+ WarnConflictingTypedMethods(ImpMethodDecl, IntfMethodDecl);
+ }
+ }
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
+ // Check for any implementation of a methods declared in protocol.
+ for (ObjCInterfaceDecl::protocol_iterator PI = I->protocol_begin(),
+ E = I->protocol_end(); PI != E; ++PI)
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ (*PI), IncompleteImpl, false);
+ if (I->getSuperClass())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ I->getSuperClass(), IncompleteImpl, false);
+ }
+}
+
+void Sema::ImplMethodsVsClassMethods(ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* CDecl,
+ bool IncompleteImpl) {
+ llvm::DenseSet<Selector> InsMap;
+ // Check and see if instance methods in class interface have been
+ // implemented in the implementation class.
+ for (ObjCImplementationDecl::instmeth_iterator
+ I = IMPDecl->instmeth_begin(Context),
+ E = IMPDecl->instmeth_end(Context); I != E; ++I)
+ InsMap.insert((*I)->getSelector());
+
+ // Check and see if properties declared in the interface have either 1)
+ // an implementation or 2) there is a @synthesize/@dynamic implementation
+ // of the property in the @implementation.
+ if (isa<ObjCInterfaceDecl>(CDecl))
+ for (ObjCContainerDecl::prop_iterator P = CDecl->prop_begin(Context),
+ E = CDecl->prop_end(Context); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ if (Prop->isInvalidDecl())
+ continue;
+ ObjCPropertyImplDecl *PI = 0;
+ // Is there a matching propery synthesize/dynamic?
+ for (ObjCImplDecl::propimpl_iterator
+ I = IMPDecl->propimpl_begin(Context),
+ EI = IMPDecl->propimpl_end(Context); I != EI; ++I)
+ if ((*I)->getPropertyDecl() == Prop) {
+ PI = (*I);
+ break;
+ }
+ if (PI)
+ continue;
+ if (!InsMap.count(Prop->getGetterName())) {
+ Diag(Prop->getLocation(),
+ diag::warn_setter_getter_impl_required)
+ << Prop->getDeclName() << Prop->getGetterName();
+ Diag(IMPDecl->getLocation(),
+ diag::note_property_impl_required);
+ }
+
+ if (!Prop->isReadOnly() && !InsMap.count(Prop->getSetterName())) {
+ Diag(Prop->getLocation(),
+ diag::warn_setter_getter_impl_required)
+ << Prop->getDeclName() << Prop->getSetterName();
+ Diag(IMPDecl->getLocation(),
+ diag::note_property_impl_required);
+ }
+ }
+
+ llvm::DenseSet<Selector> ClsMap;
+ for (ObjCImplementationDecl::classmeth_iterator
+ I = IMPDecl->classmeth_begin(Context),
+ E = IMPDecl->classmeth_end(Context); I != E; ++I)
+ ClsMap.insert((*I)->getSelector());
+
+ // Check for type conflict of methods declared in a class/protocol and
+ // its implementation; if any.
+ llvm::DenseSet<Selector> InsMapSeen, ClsMapSeen;
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl, CDecl,
+ IncompleteImpl, true);
+
+ // Check the protocol list for unimplemented methods in the @implementation
+ // class.
+ // Check and see if class methods in class interface have been
+ // implemented in the implementation class.
+
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
+ for (ObjCInterfaceDecl::protocol_iterator PI = I->protocol_begin(),
+ E = I->protocol_end(); PI != E; ++PI)
+ CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl,
+ InsMap, ClsMap, I);
+ // Check class extensions (unnamed categories)
+ for (ObjCCategoryDecl *Categories = I->getCategoryList();
+ Categories; Categories = Categories->getNextClassCategory()) {
+ if (!Categories->getIdentifier()) {
+ ImplMethodsVsClassMethods(IMPDecl, Categories, IncompleteImpl);
+ break;
+ }
+ }
+ } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ for (ObjCCategoryDecl::protocol_iterator PI = C->protocol_begin(),
+ E = C->protocol_end(); PI != E; ++PI)
+ CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl,
+ InsMap, ClsMap, C->getClassInterface());
+ } else
+ assert(false && "invalid ObjCContainerDecl type.");
+}
+
+/// ActOnForwardClassDeclaration -
+Action::DeclPtrTy
+Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
+ IdentifierInfo **IdentList,
+ unsigned NumElts) {
+ llvm::SmallVector<ObjCInterfaceDecl*, 32> Interfaces;
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl = LookupName(TUScope, IdentList[i], LookupOrdinaryName);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(AtClassLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ // GCC apparently allows the following idiom:
+ //
+ // typedef NSObject < XCElementTogglerP > XCElementToggler;
+ // @class XCElementToggler;
+ //
+ // FIXME: Make an extension?
+ TypedefDecl *TDD = dyn_cast<TypedefDecl>(PrevDecl);
+ if (!TDD || !isa<ObjCInterfaceType>(TDD->getUnderlyingType())) {
+ Diag(AtClassLoc, diag::err_redefinition_different_kind) << IdentList[i];
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ }
+ else if (TDD) {
+ // a forward class declaration matching a typedef name of a class
+ // refers to the underlying class.
+ if (ObjCInterfaceType * OI =
+ dyn_cast<ObjCInterfaceType>(TDD->getUnderlyingType()))
+ PrevDecl = OI->getDecl();
+ }
+ }
+ ObjCInterfaceDecl *IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ if (!IDecl) { // Not already seen? Make a forward decl.
+ IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassLoc,
+ IdentList[i], SourceLocation(), true);
+ PushOnScopeChains(IDecl, TUScope);
+ }
+
+ Interfaces.push_back(IDecl);
+ }
+
+ ObjCClassDecl *CDecl = ObjCClassDecl::Create(Context, CurContext, AtClassLoc,
+ &Interfaces[0],
+ Interfaces.size());
+ CurContext->addDecl(Context, CDecl);
+ CheckObjCDeclScope(CDecl);
+ return DeclPtrTy::make(CDecl);
+}
+
+
+/// MatchTwoMethodDeclarations - Checks that two methods have matching type and
+/// returns true, or false, accordingly.
+/// TODO: Handle protocol list; such as id<p1,p2> in type comparisons
+bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
+ const ObjCMethodDecl *PrevMethod,
+ bool matchBasedOnSizeAndAlignment) {
+ QualType T1 = Context.getCanonicalType(Method->getResultType());
+ QualType T2 = Context.getCanonicalType(PrevMethod->getResultType());
+
+ if (T1 != T2) {
+ // The result types are different.
+ if (!matchBasedOnSizeAndAlignment)
+ return false;
+ // Incomplete types don't have a size and alignment.
+ if (T1->isIncompleteType() || T2->isIncompleteType())
+ return false;
+ // Check is based on size and alignment.
+ if (Context.getTypeInfo(T1) != Context.getTypeInfo(T2))
+ return false;
+ }
+
+ ObjCMethodDecl::param_iterator ParamI = Method->param_begin(),
+ E = Method->param_end();
+ ObjCMethodDecl::param_iterator PrevI = PrevMethod->param_begin();
+
+ for (; ParamI != E; ++ParamI, ++PrevI) {
+ assert(PrevI != PrevMethod->param_end() && "Param mismatch");
+ T1 = Context.getCanonicalType((*ParamI)->getType());
+ T2 = Context.getCanonicalType((*PrevI)->getType());
+ if (T1 != T2) {
+ // The result types are different.
+ if (!matchBasedOnSizeAndAlignment)
+ return false;
+ // Incomplete types don't have a size and alignment.
+ if (T1->isIncompleteType() || T2->isIncompleteType())
+ return false;
+ // Check is based on size and alignment.
+ if (Context.getTypeInfo(T1) != Context.getTypeInfo(T2))
+ return false;
+ }
+ }
+ return true;
+}
+
+/// \brief Read the contents of the instance and factory method pools
+/// for a given selector from external storage.
+///
+/// This routine should only be called once, when neither the instance
+/// nor the factory method pool has an entry for this selector.
+Sema::MethodPool::iterator Sema::ReadMethodPool(Selector Sel,
+ bool isInstance) {
+ assert(ExternalSource && "We need an external AST source");
+ assert(InstanceMethodPool.find(Sel) == InstanceMethodPool.end() &&
+ "Selector data already loaded into the instance method pool");
+ assert(FactoryMethodPool.find(Sel) == FactoryMethodPool.end() &&
+ "Selector data already loaded into the factory method pool");
+
+ // Read the method list from the external source.
+ std::pair<ObjCMethodList, ObjCMethodList> Methods
+ = ExternalSource->ReadMethodPool(Sel);
+
+ if (isInstance) {
+ if (Methods.second.Method)
+ FactoryMethodPool[Sel] = Methods.second;
+ return InstanceMethodPool.insert(std::make_pair(Sel, Methods.first)).first;
+ }
+
+ if (Methods.first.Method)
+ InstanceMethodPool[Sel] = Methods.first;
+
+ return FactoryMethodPool.insert(std::make_pair(Sel, Methods.second)).first;
+}
+
+void Sema::AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method) {
+ llvm::DenseMap<Selector, ObjCMethodList>::iterator Pos
+ = InstanceMethodPool.find(Method->getSelector());
+ if (Pos == InstanceMethodPool.end()) {
+ if (ExternalSource && !FactoryMethodPool.count(Method->getSelector()))
+ Pos = ReadMethodPool(Method->getSelector(), /*isInstance=*/true);
+ else
+ Pos = InstanceMethodPool.insert(std::make_pair(Method->getSelector(),
+ ObjCMethodList())).first;
+ }
+
+ ObjCMethodList &Entry = Pos->second;
+ if (Entry.Method == 0) {
+ // Haven't seen a method with this selector name yet - add it.
+ Entry.Method = Method;
+ Entry.Next = 0;
+ return;
+ }
+
+ // We've seen a method with this name, see if we have already seen this type
+ // signature.
+ for (ObjCMethodList *List = &Entry; List; List = List->Next)
+ if (MatchTwoMethodDeclarations(Method, List->Method))
+ return;
+
+ // We have a new signature for an existing method - add it.
+ // This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
+ Entry.Next = new ObjCMethodList(Method, Entry.Next);
+}
+
+// FIXME: Finish implementing -Wno-strict-selector-match.
+ObjCMethodDecl *Sema::LookupInstanceMethodInGlobalPool(Selector Sel,
+ SourceRange R) {
+ llvm::DenseMap<Selector, ObjCMethodList>::iterator Pos
+ = InstanceMethodPool.find(Sel);
+ if (Pos == InstanceMethodPool.end()) {
+ if (ExternalSource && !FactoryMethodPool.count(Sel))
+ Pos = ReadMethodPool(Sel, /*isInstance=*/true);
+ else
+ return 0;
+ }
+
+ ObjCMethodList &MethList = Pos->second;
+ bool issueWarning = false;
+
+ if (MethList.Method && MethList.Next) {
+ for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next)
+ // This checks if the methods differ by size & alignment.
+ if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method, true))
+ issueWarning = true;
+ }
+ if (issueWarning && (MethList.Method && MethList.Next)) {
+ Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R;
+ Diag(MethList.Method->getLocStart(), diag::note_using_decl)
+ << MethList.Method->getSourceRange();
+ for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next)
+ Diag(Next->Method->getLocStart(), diag::note_also_found_decl)
+ << Next->Method->getSourceRange();
+ }
+ return MethList.Method;
+}
+
+void Sema::AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method) {
+ llvm::DenseMap<Selector, ObjCMethodList>::iterator Pos
+ = FactoryMethodPool.find(Method->getSelector());
+ if (Pos == FactoryMethodPool.end()) {
+ if (ExternalSource && !InstanceMethodPool.count(Method->getSelector()))
+ Pos = ReadMethodPool(Method->getSelector(), /*isInstance=*/false);
+ else
+ Pos = FactoryMethodPool.insert(std::make_pair(Method->getSelector(),
+ ObjCMethodList())).first;
+ }
+
+ ObjCMethodList &FirstMethod = Pos->second;
+ if (!FirstMethod.Method) {
+ // Haven't seen a method with this selector name yet - add it.
+ FirstMethod.Method = Method;
+ FirstMethod.Next = 0;
+ } else {
+ // We've seen a method with this name, now check the type signature(s).
+ bool match = MatchTwoMethodDeclarations(Method, FirstMethod.Method);
+
+ for (ObjCMethodList *Next = FirstMethod.Next; !match && Next;
+ Next = Next->Next)
+ match = MatchTwoMethodDeclarations(Method, Next->Method);
+
+ if (!match) {
+ // We have a new signature for an existing method - add it.
+ // This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
+ struct ObjCMethodList *OMI = new ObjCMethodList(Method, FirstMethod.Next);
+ FirstMethod.Next = OMI;
+ }
+ }
+}
+
+ObjCMethodDecl *Sema::LookupFactoryMethodInGlobalPool(Selector Sel,
+ SourceRange R) {
+ llvm::DenseMap<Selector, ObjCMethodList>::iterator Pos
+ = FactoryMethodPool.find(Sel);
+ if (Pos == FactoryMethodPool.end()) {
+ if (ExternalSource && !InstanceMethodPool.count(Sel))
+ Pos = ReadMethodPool(Sel, /*isInstance=*/false);
+ else
+ return 0;
+ }
+
+ ObjCMethodList &MethList = Pos->second;
+ bool issueWarning = false;
+
+ if (MethList.Method && MethList.Next) {
+ for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next)
+ // This checks if the methods differ by size & alignment.
+ if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method, true))
+ issueWarning = true;
+ }
+ if (issueWarning && (MethList.Method && MethList.Next)) {
+ Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R;
+ Diag(MethList.Method->getLocStart(), diag::note_using_decl)
+ << MethList.Method->getSourceRange();
+ for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next)
+ Diag(Next->Method->getLocStart(), diag::note_also_found_decl)
+ << Next->Method->getSourceRange();
+ }
+ return MethList.Method;
+}
+
+/// ProcessPropertyDecl - Make sure that any user-defined setter/getter methods
+/// have the property type and issue diagnostics if they don't.
+/// Also synthesize a getter/setter method if none exist (and update the
+/// appropriate lookup tables. FIXME: Should reconsider if adding synthesized
+/// methods is the "right" thing to do.
+void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
+ ObjCContainerDecl *CD) {
+ ObjCMethodDecl *GetterMethod, *SetterMethod;
+
+ GetterMethod = CD->getInstanceMethod(Context, property->getGetterName());
+ SetterMethod = CD->getInstanceMethod(Context, property->getSetterName());
+ DiagnosePropertyAccessorMismatch(property, GetterMethod,
+ property->getLocation());
+
+ if (SetterMethod) {
+ if (Context.getCanonicalType(SetterMethod->getResultType())
+ != Context.VoidTy)
+ Diag(SetterMethod->getLocation(), diag::err_setter_type_void);
+ if (SetterMethod->param_size() != 1 ||
+ ((*SetterMethod->param_begin())->getType() != property->getType())) {
+ Diag(property->getLocation(),
+ diag::warn_accessor_property_type_mismatch)
+ << property->getDeclName()
+ << SetterMethod->getSelector();
+ Diag(SetterMethod->getLocation(), diag::note_declared_at);
+ }
+ }
+
+ // Synthesize getter/setter methods if none exist.
+ // Find the default getter and if one not found, add one.
+ // FIXME: The synthesized property we set here is misleading. We almost always
+ // synthesize these methods unless the user explicitly provided prototypes
+ // (which is odd, but allowed). Sema should be typechecking that the
+ // declarations jive in that situation (which it is not currently).
+ if (!GetterMethod) {
+ // No instance method of same name as property getter name was found.
+ // Declare a getter method and add it to the list of methods
+ // for this class.
+ GetterMethod = ObjCMethodDecl::Create(Context, property->getLocation(),
+ property->getLocation(), property->getGetterName(),
+ property->getType(), CD, true, false, true,
+ (property->getPropertyImplementation() ==
+ ObjCPropertyDecl::Optional) ?
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+ CD->addDecl(Context, GetterMethod);
+ } else
+ // A user declared getter will be synthesize when @synthesize of
+ // the property with the same name is seen in the @implementation
+ GetterMethod->setSynthesized(true);
+ property->setGetterMethodDecl(GetterMethod);
+
+ // Skip setter if property is read-only.
+ if (!property->isReadOnly()) {
+ // Find the default setter and if one not found, add one.
+ if (!SetterMethod) {
+ // No instance method of same name as property setter name was found.
+ // Declare a setter method and add it to the list of methods
+ // for this class.
+ SetterMethod = ObjCMethodDecl::Create(Context, property->getLocation(),
+ property->getLocation(),
+ property->getSetterName(),
+ Context.VoidTy, CD, true, false, true,
+ (property->getPropertyImplementation() ==
+ ObjCPropertyDecl::Optional) ?
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+ // Invent the arguments for the setter. We don't bother making a
+ // nice name for the argument.
+ ParmVarDecl *Argument = ParmVarDecl::Create(Context, SetterMethod,
+ property->getLocation(),
+ property->getIdentifier(),
+ property->getType(),
+ VarDecl::None,
+ 0);
+ SetterMethod->setMethodParams(Context, &Argument, 1);
+ CD->addDecl(Context, SetterMethod);
+ } else
+ // A user declared setter will be synthesize when @synthesize of
+ // the property with the same name is seen in the @implementation
+ SetterMethod->setSynthesized(true);
+ property->setSetterMethodDecl(SetterMethod);
+ }
+ // Add any synthesized methods to the global pool. This allows us to
+ // handle the following, which is supported by GCC (and part of the design).
+ //
+ // @interface Foo
+ // @property double bar;
+ // @end
+ //
+ // void thisIsUnfortunate() {
+ // id foo;
+ // double bar = [foo bar];
+ // }
+ //
+ if (GetterMethod)
+ AddInstanceMethodToGlobalPool(GetterMethod);
+ if (SetterMethod)
+ AddInstanceMethodToGlobalPool(SetterMethod);
+}
+
+// Note: For class/category implemenations, allMethods/allProperties is
+// always null.
+void Sema::ActOnAtEnd(SourceLocation AtEndLoc, DeclPtrTy classDecl,
+ DeclPtrTy *allMethods, unsigned allNum,
+ DeclPtrTy *allProperties, unsigned pNum,
+ DeclGroupPtrTy *allTUVars, unsigned tuvNum) {
+ Decl *ClassDecl = classDecl.getAs<Decl>();
+
+ // FIXME: If we don't have a ClassDecl, we have an error. We should consider
+ // always passing in a decl. If the decl has an error, isInvalidDecl()
+ // should be true.
+ if (!ClassDecl)
+ return;
+
+ bool isInterfaceDeclKind =
+ isa<ObjCInterfaceDecl>(ClassDecl) || isa<ObjCCategoryDecl>(ClassDecl)
+ || isa<ObjCProtocolDecl>(ClassDecl);
+ bool checkIdenticalMethods = isa<ObjCImplementationDecl>(ClassDecl);
+
+ DeclContext *DC = dyn_cast<DeclContext>(ClassDecl);
+
+ // FIXME: Remove these and use the ObjCContainerDecl/DeclContext.
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> InsMap;
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> ClsMap;
+
+ for (unsigned i = 0; i < allNum; i++ ) {
+ ObjCMethodDecl *Method =
+ cast_or_null<ObjCMethodDecl>(allMethods[i].getAs<Decl>());
+
+ if (!Method) continue; // Already issued a diagnostic.
+ if (Method->isInstanceMethod()) {
+ /// Check for instance method of the same name with incompatible types
+ const ObjCMethodDecl *&PrevMethod = InsMap[Method->getSelector()];
+ bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod)
+ : false;
+ if ((isInterfaceDeclKind && PrevMethod && !match)
+ || (checkIdenticalMethods && match)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ } else {
+ DC->addDecl(Context, Method);
+ InsMap[Method->getSelector()] = Method;
+ /// The following allows us to typecheck messages to "id".
+ AddInstanceMethodToGlobalPool(Method);
+ }
+ }
+ else {
+ /// Check for class method of the same name with incompatible types
+ const ObjCMethodDecl *&PrevMethod = ClsMap[Method->getSelector()];
+ bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod)
+ : false;
+ if ((isInterfaceDeclKind && PrevMethod && !match)
+ || (checkIdenticalMethods && match)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ } else {
+ DC->addDecl(Context, Method);
+ ClsMap[Method->getSelector()] = Method;
+ /// The following allows us to typecheck messages to "Class".
+ AddFactoryMethodToGlobalPool(Method);
+ }
+ }
+ }
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
+ // Compares properties declared in this class to those of its
+ // super class.
+ ComparePropertiesInBaseAndSuper(I);
+ MergeProtocolPropertiesIntoClass(I, DeclPtrTy::make(I));
+ } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
+ // Categories are used to extend the class by declaring new methods.
+ // By the same token, they are also used to add new properties. No
+ // need to compare the added property to those in the class.
+
+ // Merge protocol properties into category
+ MergeProtocolPropertiesIntoClass(C, DeclPtrTy::make(C));
+ if (C->getIdentifier() == 0)
+ DiagnoseClassExtensionDupMethods(C, C->getClassInterface());
+ }
+ if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(ClassDecl)) {
+ // ProcessPropertyDecl is responsible for diagnosing conflicts with any
+ // user-defined setter/getter. It also synthesizes setter/getter methods
+ // and adds them to the DeclContext and global method pools.
+ for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(Context),
+ E = CDecl->prop_end(Context);
+ I != E; ++I)
+ ProcessPropertyDecl(*I, CDecl);
+ CDecl->setAtEndLoc(AtEndLoc);
+ }
+ if (ObjCImplementationDecl *IC=dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
+ IC->setLocEnd(AtEndLoc);
+ if (ObjCInterfaceDecl* IDecl = IC->getClassInterface())
+ ImplMethodsVsClassMethods(IC, IDecl);
+ } else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ClassDecl)) {
+ CatImplClass->setLocEnd(AtEndLoc);
+
+ // Find category interface decl and then check that all methods declared
+ // in this interface are implemented in the category @implementation.
+ if (ObjCInterfaceDecl* IDecl = CatImplClass->getClassInterface()) {
+ for (ObjCCategoryDecl *Categories = IDecl->getCategoryList();
+ Categories; Categories = Categories->getNextClassCategory()) {
+ if (Categories->getIdentifier() == CatImplClass->getIdentifier()) {
+ ImplMethodsVsClassMethods(CatImplClass, Categories);
+ break;
+ }
+ }
+ }
+ }
+ if (isInterfaceDeclKind) {
+ // Reject invalid vardecls.
+ for (unsigned i = 0; i != tuvNum; i++) {
+ DeclGroupRef DG = allTUVars[i].getAsVal<DeclGroupRef>();
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ if (VarDecl *VDecl = dyn_cast<VarDecl>(*I)) {
+ if (!VDecl->hasExternalStorage())
+ Diag(VDecl->getLocation(), diag::err_objc_var_decl_inclass);
+ }
+ }
+ }
+}
+
+
+/// CvtQTToAstBitMask - utility routine to produce an AST bitmask for
+/// objective-c's type qualifier from the parser version of the same info.
+static Decl::ObjCDeclQualifier
+CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) {
+ Decl::ObjCDeclQualifier ret = Decl::OBJC_TQ_None;
+ if (PQTVal & ObjCDeclSpec::DQ_In)
+ ret = (Decl::ObjCDeclQualifier)(ret | Decl::OBJC_TQ_In);
+ if (PQTVal & ObjCDeclSpec::DQ_Inout)
+ ret = (Decl::ObjCDeclQualifier)(ret | Decl::OBJC_TQ_Inout);
+ if (PQTVal & ObjCDeclSpec::DQ_Out)
+ ret = (Decl::ObjCDeclQualifier)(ret | Decl::OBJC_TQ_Out);
+ if (PQTVal & ObjCDeclSpec::DQ_Bycopy)
+ ret = (Decl::ObjCDeclQualifier)(ret | Decl::OBJC_TQ_Bycopy);
+ if (PQTVal & ObjCDeclSpec::DQ_Byref)
+ ret = (Decl::ObjCDeclQualifier)(ret | Decl::OBJC_TQ_Byref);
+ if (PQTVal & ObjCDeclSpec::DQ_Oneway)
+ ret = (Decl::ObjCDeclQualifier)(ret | Decl::OBJC_TQ_Oneway);
+
+ return ret;
+}
+
+Sema::DeclPtrTy Sema::ActOnMethodDeclaration(
+ SourceLocation MethodLoc, SourceLocation EndLoc,
+ tok::TokenKind MethodType, DeclPtrTy classDecl,
+ ObjCDeclSpec &ReturnQT, TypeTy *ReturnType,
+ Selector Sel,
+ // optional arguments. The number of types/arguments is obtained
+ // from the Sel.getNumArgs().
+ ObjCArgInfo *ArgInfo,
+ llvm::SmallVectorImpl<Declarator> &Cdecls,
+ AttributeList *AttrList, tok::ObjCKeywordKind MethodDeclKind,
+ bool isVariadic) {
+ Decl *ClassDecl = classDecl.getAs<Decl>();
+
+ // Make sure we can establish a context for the method.
+ if (!ClassDecl) {
+ Diag(MethodLoc, diag::error_missing_method_context);
+ return DeclPtrTy();
+ }
+ QualType resultDeclType;
+
+ if (ReturnType) {
+ resultDeclType = QualType::getFromOpaquePtr(ReturnType);
+
+ // Methods cannot return interface types. All ObjC objects are
+ // passed by reference.
+ if (resultDeclType->isObjCInterfaceType()) {
+ Diag(MethodLoc, diag::err_object_cannot_be_passed_returned_by_value)
+ << 0 << resultDeclType;
+ return DeclPtrTy();
+ }
+ } else // get the type for "id".
+ resultDeclType = Context.getObjCIdType();
+
+ ObjCMethodDecl* ObjCMethod =
+ ObjCMethodDecl::Create(Context, MethodLoc, EndLoc, Sel, resultDeclType,
+ cast<DeclContext>(ClassDecl),
+ MethodType == tok::minus, isVariadic,
+ false,
+ MethodDeclKind == tok::objc_optional ?
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+
+ llvm::SmallVector<ParmVarDecl*, 16> Params;
+
+ for (unsigned i = 0, e = Sel.getNumArgs(); i != e; ++i) {
+ QualType ArgType, UnpromotedArgType;
+
+ if (ArgInfo[i].Type == 0) {
+ UnpromotedArgType = ArgType = Context.getObjCIdType();
+ } else {
+ UnpromotedArgType = ArgType = QualType::getFromOpaquePtr(ArgInfo[i].Type);
+ // Perform the default array/function conversions (C99 6.7.5.3p[7,8]).
+ ArgType = adjustParameterType(ArgType);
+ }
+
+ ParmVarDecl* Param;
+ if (ArgType == UnpromotedArgType)
+ Param = ParmVarDecl::Create(Context, ObjCMethod, ArgInfo[i].NameLoc,
+ ArgInfo[i].Name, ArgType,
+ VarDecl::None, 0);
+ else
+ Param = OriginalParmVarDecl::Create(Context, ObjCMethod,
+ ArgInfo[i].NameLoc,
+ ArgInfo[i].Name, ArgType,
+ UnpromotedArgType,
+ VarDecl::None, 0);
+
+ if (ArgType->isObjCInterfaceType()) {
+ Diag(ArgInfo[i].NameLoc,
+ diag::err_object_cannot_be_passed_returned_by_value)
+ << 1 << ArgType;
+ Param->setInvalidDecl();
+ }
+
+ Param->setObjCDeclQualifier(
+ CvtQTToAstBitMask(ArgInfo[i].DeclSpec.getObjCDeclQualifier()));
+
+ // Apply the attributes to the parameter.
+ ProcessDeclAttributeList(Param, ArgInfo[i].ArgAttrs);
+
+ Params.push_back(Param);
+ }
+
+ ObjCMethod->setMethodParams(Context, Params.data(), Sel.getNumArgs());
+ ObjCMethod->setObjCDeclQualifier(
+ CvtQTToAstBitMask(ReturnQT.getObjCDeclQualifier()));
+ const ObjCMethodDecl *PrevMethod = 0;
+
+ if (AttrList)
+ ProcessDeclAttributeList(ObjCMethod, AttrList);
+
+ // For implementations (which can be very "coarse grain"), we add the
+ // method now. This allows the AST to implement lookup methods that work
+ // incrementally (without waiting until we parse the @end). It also allows
+ // us to flag multiple declaration errors as they occur.
+ if (ObjCImplementationDecl *ImpDecl =
+ dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
+ if (MethodType == tok::minus) {
+ PrevMethod = ImpDecl->getInstanceMethod(Context, Sel);
+ ImpDecl->addInstanceMethod(Context, ObjCMethod);
+ } else {
+ PrevMethod = ImpDecl->getClassMethod(Context, Sel);
+ ImpDecl->addClassMethod(Context, ObjCMethod);
+ }
+ if (AttrList)
+ Diag(EndLoc, diag::warn_attribute_method_def);
+ }
+ else if (ObjCCategoryImplDecl *CatImpDecl =
+ dyn_cast<ObjCCategoryImplDecl>(ClassDecl)) {
+ if (MethodType == tok::minus) {
+ PrevMethod = CatImpDecl->getInstanceMethod(Context, Sel);
+ CatImpDecl->addInstanceMethod(Context, ObjCMethod);
+ } else {
+ PrevMethod = CatImpDecl->getClassMethod(Context, Sel);
+ CatImpDecl->addClassMethod(Context, ObjCMethod);
+ }
+ if (AttrList)
+ Diag(EndLoc, diag::warn_attribute_method_def);
+ }
+ if (PrevMethod) {
+ // You can never have two method definitions with the same name.
+ Diag(ObjCMethod->getLocation(), diag::err_duplicate_method_decl)
+ << ObjCMethod->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ return DeclPtrTy::make(ObjCMethod);
+}
+
+void Sema::CheckObjCPropertyAttributes(QualType PropertyTy,
+ SourceLocation Loc,
+ unsigned &Attributes) {
+ // FIXME: Improve the reported location.
+
+ // readonly and readwrite/assign/retain/copy conflict.
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
+ ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain))) {
+ const char * which = (Attributes & ObjCDeclSpec::DQ_PR_readwrite) ?
+ "readwrite" :
+ (Attributes & ObjCDeclSpec::DQ_PR_assign) ?
+ "assign" :
+ (Attributes & ObjCDeclSpec::DQ_PR_copy) ?
+ "copy" : "retain";
+
+ Diag(Loc, (Attributes & (ObjCDeclSpec::DQ_PR_readwrite)) ?
+ diag::err_objc_property_attr_mutually_exclusive :
+ diag::warn_objc_property_attr_mutually_exclusive)
+ << "readonly" << which;
+ }
+
+ // Check for copy or retain on non-object types.
+ if ((Attributes & (ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain)) &&
+ !Context.isObjCObjectPointerType(PropertyTy)) {
+ Diag(Loc, diag::err_objc_property_requires_object)
+ << (Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain");
+ Attributes &= ~(ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain);
+ }
+
+ // Check for more than one of { assign, copy, retain }.
+ if (Attributes & ObjCDeclSpec::DQ_PR_assign) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "copy";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ } else if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ }
+
+ // Warn if user supplied no assignment attribute, property is
+ // readwrite, and this is an object type.
+ if (!(Attributes & (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain)) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ Context.isObjCObjectPointerType(PropertyTy)) {
+ // Skip this warning in gc-only mode.
+ if (getLangOptions().getGCMode() != LangOptions::GCOnly)
+ Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
+
+ // If non-gc code warn that this is likely inappropriate.
+ if (getLangOptions().getGCMode() == LangOptions::NonGC)
+ Diag(Loc, diag::warn_objc_property_default_assign_on_object);
+
+ // FIXME: Implement warning dependent on NSCopying being
+ // implemented. See also:
+ // <rdar://5168496&4855821&5607453&5096644&4947311&5698469&4947014&5168496>
+ // (please trim this list while you are at it).
+ }
+
+ if (!(Attributes & ObjCDeclSpec::DQ_PR_copy)
+ && getLangOptions().getGCMode() == LangOptions::GCOnly
+ && PropertyTy->isBlockPointerType())
+ Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
+}
+
+Sema::DeclPtrTy Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
+ FieldDeclarator &FD,
+ ObjCDeclSpec &ODS,
+ Selector GetterSel,
+ Selector SetterSel,
+ DeclPtrTy ClassCategory,
+ bool *isOverridingProperty,
+ tok::ObjCKeywordKind MethodImplKind) {
+ unsigned Attributes = ODS.getPropertyAttributes();
+ bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
+ // default is readwrite!
+ !(Attributes & ObjCDeclSpec::DQ_PR_readonly));
+ // property is defaulted to 'assign' if it is readwrite and is
+ // not retain or copy
+ bool isAssign = ((Attributes & ObjCDeclSpec::DQ_PR_assign) ||
+ (isReadWrite &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_copy)));
+ QualType T = GetTypeForDeclarator(FD.D, S);
+ Decl *ClassDecl = ClassCategory.getAs<Decl>();
+ ObjCInterfaceDecl *CCPrimary = 0; // continuation class's primary class
+ // May modify Attributes.
+ CheckObjCPropertyAttributes(T, AtLoc, Attributes);
+ if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl))
+ if (!CDecl->getIdentifier()) {
+ // This is a continuation class. property requires special
+ // handling.
+ if ((CCPrimary = CDecl->getClassInterface())) {
+ // Find the property in continuation class's primary class only.
+ ObjCPropertyDecl *PIDecl = 0;
+ IdentifierInfo *PropertyId = FD.D.getIdentifier();
+ for (ObjCInterfaceDecl::prop_iterator
+ I = CCPrimary->prop_begin(Context),
+ E = CCPrimary->prop_end(Context);
+ I != E; ++I)
+ if ((*I)->getIdentifier() == PropertyId) {
+ PIDecl = *I;
+ break;
+ }
+
+ if (PIDecl) {
+ // property 'PIDecl's readonly attribute will be over-ridden
+ // with continuation class's readwrite property attribute!
+ unsigned PIkind = PIDecl->getPropertyAttributes();
+ if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) {
+ if ((Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) !=
+ (PIkind & ObjCPropertyDecl::OBJC_PR_nonatomic))
+ Diag(AtLoc, diag::warn_property_attr_mismatch);
+ PIDecl->makeitReadWriteAttribute();
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+ PIDecl->setSetterName(SetterSel);
+ }
+ else
+ Diag(AtLoc, diag::err_use_continuation_class)
+ << CCPrimary->getDeclName();
+ *isOverridingProperty = true;
+ // Make sure setter decl is synthesized, and added to primary
+ // class's list.
+ ProcessPropertyDecl(PIDecl, CCPrimary);
+ return DeclPtrTy();
+ }
+ // No matching property found in the primary class. Just fall thru
+ // and add property to continuation class's primary class.
+ ClassDecl = CCPrimary;
+ } else {
+ Diag(CDecl->getLocation(), diag::err_continuation_class);
+ *isOverridingProperty = true;
+ return DeclPtrTy();
+ }
+ }
+
+ DeclContext *DC = dyn_cast<DeclContext>(ClassDecl);
+ assert(DC && "ClassDecl is not a DeclContext");
+ ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC,
+ FD.D.getIdentifierLoc(),
+ FD.D.getIdentifier(), T);
+ DC->addDecl(Context, PDecl);
+
+ if (T->isArrayType() || T->isFunctionType()) {
+ Diag(AtLoc, diag::err_property_type) << T;
+ PDecl->setInvalidDecl();
+ }
+
+ ProcessDeclAttributes(PDecl, FD.D);
+
+ // Regardless of setter/getter attribute, we save the default getter/setter
+ // selector names in anticipation of declaration of setter/getter methods.
+ PDecl->setGetterName(GetterSel);
+ PDecl->setSetterName(SetterSel);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_getter)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_getter);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_setter)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_setter);
+
+ if (isReadWrite)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+
+ if (isAssign)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
+
+ if (MethodImplKind == tok::objc_required)
+ PDecl->setPropertyImplementation(ObjCPropertyDecl::Required);
+ else if (MethodImplKind == tok::objc_optional)
+ PDecl->setPropertyImplementation(ObjCPropertyDecl::Optional);
+ // A case of continuation class adding a new property in the class. This
+ // is not what it was meant for. However, gcc supports it and so should we.
+ // Make sure setter/getters are declared here.
+ if (CCPrimary)
+ ProcessPropertyDecl(PDecl, CCPrimary);
+
+ return DeclPtrTy::make(PDecl);
+}
+
+/// ActOnPropertyImplDecl - This routine performs semantic checks and
+/// builds the AST node for a property implementation declaration; declared
+/// as @synthesize or @dynamic.
+///
+Sema::DeclPtrTy Sema::ActOnPropertyImplDecl(SourceLocation AtLoc,
+ SourceLocation PropertyLoc,
+ bool Synthesize,
+ DeclPtrTy ClassCatImpDecl,
+ IdentifierInfo *PropertyId,
+ IdentifierInfo *PropertyIvar) {
+ Decl *ClassImpDecl = ClassCatImpDecl.getAs<Decl>();
+ // Make sure we have a context for the property implementation declaration.
+ if (!ClassImpDecl) {
+ Diag(AtLoc, diag::error_missing_property_context);
+ return DeclPtrTy();
+ }
+ ObjCPropertyDecl *property = 0;
+ ObjCInterfaceDecl* IDecl = 0;
+ // Find the class or category class where this property must have
+ // a declaration.
+ ObjCImplementationDecl *IC = 0;
+ ObjCCategoryImplDecl* CatImplClass = 0;
+ if ((IC = dyn_cast<ObjCImplementationDecl>(ClassImpDecl))) {
+ IDecl = IC->getClassInterface();
+ // We always synthesize an interface for an implementation
+ // without an interface decl. So, IDecl is always non-zero.
+ assert(IDecl &&
+ "ActOnPropertyImplDecl - @implementation without @interface");
+
+ // Look for this property declaration in the @implementation's @interface
+ property = IDecl->FindPropertyDeclaration(Context, PropertyId);
+ if (!property) {
+ Diag(PropertyLoc, diag::error_bad_property_decl) << IDecl->getDeclName();
+ return DeclPtrTy();
+ }
+ }
+ else if ((CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassImpDecl))) {
+ if (Synthesize) {
+ Diag(AtLoc, diag::error_synthesize_category_decl);
+ return DeclPtrTy();
+ }
+ IDecl = CatImplClass->getClassInterface();
+ if (!IDecl) {
+ Diag(AtLoc, diag::error_missing_property_interface);
+ return DeclPtrTy();
+ }
+ ObjCCategoryDecl *Category =
+ IDecl->FindCategoryDeclaration(CatImplClass->getIdentifier());
+
+ // If category for this implementation not found, it is an error which
+ // has already been reported eralier.
+ if (!Category)
+ return DeclPtrTy();
+ // Look for this property declaration in @implementation's category
+ property = Category->FindPropertyDeclaration(Context, PropertyId);
+ if (!property) {
+ Diag(PropertyLoc, diag::error_bad_category_property_decl)
+ << Category->getDeclName();
+ return DeclPtrTy();
+ }
+ } else {
+ Diag(AtLoc, diag::error_bad_property_context);
+ return DeclPtrTy();
+ }
+ ObjCIvarDecl *Ivar = 0;
+ // Check that we have a valid, previously declared ivar for @synthesize
+ if (Synthesize) {
+ // @synthesize
+ if (!PropertyIvar)
+ PropertyIvar = PropertyId;
+ QualType PropType = Context.getCanonicalType(property->getType());
+ // Check that this is a previously declared 'ivar' in 'IDecl' interface
+ ObjCInterfaceDecl *ClassDeclared;
+ Ivar = IDecl->lookupInstanceVariable(Context, PropertyIvar, ClassDeclared);
+ if (!Ivar) {
+ Ivar = ObjCIvarDecl::Create(Context, CurContext, PropertyLoc,
+ PropertyIvar, PropType,
+ ObjCIvarDecl::Public,
+ (Expr *)0);
+ property->setPropertyIvarDecl(Ivar);
+ if (!getLangOptions().ObjCNonFragileABI)
+ Diag(PropertyLoc, diag::error_missing_property_ivar_decl) << PropertyId;
+ // Note! I deliberately want it to fall thru so, we have a
+ // a property implementation and to avoid future warnings.
+ }
+ else if (getLangOptions().ObjCNonFragileABI &&
+ ClassDeclared != IDecl) {
+ Diag(PropertyLoc, diag::error_ivar_in_superclass_use)
+ << property->getDeclName() << Ivar->getDeclName()
+ << ClassDeclared->getDeclName();
+ Diag(Ivar->getLocation(), diag::note_previous_access_declaration)
+ << Ivar << Ivar->getNameAsCString();
+ // Note! I deliberately want it to fall thru so more errors are caught.
+ }
+ QualType IvarType = Context.getCanonicalType(Ivar->getType());
+
+ // Check that type of property and its ivar are type compatible.
+ if (PropType != IvarType) {
+ if (CheckAssignmentConstraints(PropType, IvarType) != Compatible) {
+ Diag(PropertyLoc, diag::error_property_ivar_type)
+ << property->getDeclName() << Ivar->getDeclName();
+ // Note! I deliberately want it to fall thru so, we have a
+ // a property implementation and to avoid future warnings.
+ }
+
+ // FIXME! Rules for properties are somewhat different that those
+ // for assignments. Use a new routine to consolidate all cases;
+ // specifically for property redeclarations as well as for ivars.
+ QualType lhsType =Context.getCanonicalType(PropType).getUnqualifiedType();
+ QualType rhsType =Context.getCanonicalType(IvarType).getUnqualifiedType();
+ if (lhsType != rhsType &&
+ lhsType->isArithmeticType()) {
+ Diag(PropertyLoc, diag::error_property_ivar_type)
+ << property->getDeclName() << Ivar->getDeclName();
+ // Fall thru - see previous comment
+ }
+ // __weak is explicit. So it works on Canonical type.
+ if (PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
+ getLangOptions().getGCMode() != LangOptions::NonGC) {
+ Diag(PropertyLoc, diag::error_weak_property)
+ << property->getDeclName() << Ivar->getDeclName();
+ // Fall thru - see previous comment
+ }
+ if ((Context.isObjCObjectPointerType(property->getType()) ||
+ PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() &&
+ getLangOptions().getGCMode() != LangOptions::NonGC) {
+ Diag(PropertyLoc, diag::error_strong_property)
+ << property->getDeclName() << Ivar->getDeclName();
+ // Fall thru - see previous comment
+ }
+ }
+ } else if (PropertyIvar)
+ // @dynamic
+ Diag(PropertyLoc, diag::error_dynamic_property_ivar_decl);
+ assert (property && "ActOnPropertyImplDecl - property declaration missing");
+ ObjCPropertyImplDecl *PIDecl =
+ ObjCPropertyImplDecl::Create(Context, CurContext, AtLoc, PropertyLoc,
+ property,
+ (Synthesize ?
+ ObjCPropertyImplDecl::Synthesize
+ : ObjCPropertyImplDecl::Dynamic),
+ Ivar);
+ if (IC) {
+ if (Synthesize)
+ if (ObjCPropertyImplDecl *PPIDecl =
+ IC->FindPropertyImplIvarDecl(Context, PropertyIvar)) {
+ Diag(PropertyLoc, diag::error_duplicate_ivar_use)
+ << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
+ << PropertyIvar;
+ Diag(PPIDecl->getLocation(), diag::note_previous_use);
+ }
+
+ if (ObjCPropertyImplDecl *PPIDecl
+ = IC->FindPropertyImplDecl(Context, PropertyId)) {
+ Diag(PropertyLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
+ return DeclPtrTy();
+ }
+ IC->addPropertyImplementation(Context, PIDecl);
+ }
+ else {
+ if (Synthesize)
+ if (ObjCPropertyImplDecl *PPIDecl =
+ CatImplClass->FindPropertyImplIvarDecl(Context, PropertyIvar)) {
+ Diag(PropertyLoc, diag::error_duplicate_ivar_use)
+ << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
+ << PropertyIvar;
+ Diag(PPIDecl->getLocation(), diag::note_previous_use);
+ }
+
+ if (ObjCPropertyImplDecl *PPIDecl =
+ CatImplClass->FindPropertyImplDecl(Context, PropertyId)) {
+ Diag(PropertyLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
+ return DeclPtrTy();
+ }
+ CatImplClass->addPropertyImplementation(Context, PIDecl);
+ }
+
+ return DeclPtrTy::make(PIDecl);
+}
+
+bool Sema::CheckObjCDeclScope(Decl *D) {
+ if (isa<TranslationUnitDecl>(CurContext->getLookupContext()))
+ return false;
+
+ Diag(D->getLocation(), diag::err_objc_decls_may_only_appear_in_global_scope);
+ D->setInvalidDecl();
+
+ return true;
+}
+
+/// Collect the instance variables declared in an Objective-C object. Used in
+/// the creation of structures from objects using the @defs directive.
+/// FIXME: This should be consolidated with CollectObjCIvars as it is also
+/// part of the AST generation logic of @defs.
+static void CollectIvars(ObjCInterfaceDecl *Class, RecordDecl *Record,
+ ASTContext& Ctx,
+ llvm::SmallVectorImpl<Sema::DeclPtrTy> &ivars) {
+ if (Class->getSuperClass())
+ CollectIvars(Class->getSuperClass(), Record, Ctx, ivars);
+
+ // For each ivar, create a fresh ObjCAtDefsFieldDecl.
+ for (ObjCInterfaceDecl::ivar_iterator I = Class->ivar_begin(),
+ E = Class->ivar_end(); I != E; ++I) {
+ ObjCIvarDecl* ID = *I;
+ Decl *FD = ObjCAtDefsFieldDecl::Create(Ctx, Record, ID->getLocation(),
+ ID->getIdentifier(), ID->getType(),
+ ID->getBitWidth());
+ ivars.push_back(Sema::DeclPtrTy::make(FD));
+ }
+}
+
+/// Called whenever @defs(ClassName) is encountered in the source. Inserts the
+/// instance variables of ClassName into Decls.
+void Sema::ActOnDefs(Scope *S, DeclPtrTy TagD, SourceLocation DeclStart,
+ IdentifierInfo *ClassName,
+ llvm::SmallVectorImpl<DeclPtrTy> &Decls) {
+ // Check that ClassName is a valid class
+ ObjCInterfaceDecl *Class = getObjCInterfaceDecl(ClassName);
+ if (!Class) {
+ Diag(DeclStart, diag::err_undef_interface) << ClassName;
+ return;
+ }
+ if (LangOpts.ObjCNonFragileABI) {
+ Diag(DeclStart, diag::err_atdef_nonfragile_interface);
+ return;
+ }
+
+ // Collect the instance variables
+ CollectIvars(Class, dyn_cast<RecordDecl>(TagD.getAs<Decl>()), Context, Decls);
+
+ // Introduce all of these fields into the appropriate scope.
+ for (llvm::SmallVectorImpl<DeclPtrTy>::iterator D = Decls.begin();
+ D != Decls.end(); ++D) {
+ FieldDecl *FD = cast<FieldDecl>(D->getAs<Decl>());
+ if (getLangOptions().CPlusPlus)
+ PushOnScopeChains(cast<FieldDecl>(FD), S);
+ else if (RecordDecl *Record = dyn_cast<RecordDecl>(TagD.getAs<Decl>()))
+ Record->addDecl(Context, FD);
+ }
+}
+
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
new file mode 100644
index 0000000..ee5132a
--- /dev/null
+++ b/lib/Sema/SemaExpr.cpp
@@ -0,0 +1,5395 @@
+//===--- SemaExpr.cpp - Semantic Analysis for Expressions -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Designator.h"
+#include "clang/Parse/Scope.h"
+using namespace clang;
+
+/// \brief Determine whether the use of this declaration is valid, and
+/// emit any corresponding diagnostics.
+///
+/// This routine diagnoses various problems with referencing
+/// declarations that can occur when using a declaration. For example,
+/// it might warn if a deprecated or unavailable declaration is being
+/// used, or produce an error (and return true) if a C++0x deleted
+/// function is being used.
+///
+/// \returns true if there was an error (this declaration cannot be
+/// referenced), false otherwise.
+bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc) {
+ // See if the decl is deprecated.
+ if (D->getAttr<DeprecatedAttr>()) {
+ // Implementing deprecated stuff requires referencing deprecated
+ // stuff. Don't warn if we are implementing a deprecated
+ // construct.
+ bool isSilenced = false;
+
+ if (NamedDecl *ND = getCurFunctionOrMethodDecl()) {
+ // If this reference happens *in* a deprecated function or method, don't
+ // warn.
+ isSilenced = ND->getAttr<DeprecatedAttr>();
+
+ // If this is an Objective-C method implementation, check to see if the
+ // method was deprecated on the declaration, not the definition.
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(ND)) {
+ // The semantic decl context of a ObjCMethodDecl is the
+ // ObjCImplementationDecl.
+ if (ObjCImplementationDecl *Impl
+ = dyn_cast<ObjCImplementationDecl>(MD->getParent())) {
+
+ MD = Impl->getClassInterface()->getMethod(Context,
+ MD->getSelector(),
+ MD->isInstanceMethod());
+ isSilenced |= MD && MD->getAttr<DeprecatedAttr>();
+ }
+ }
+ }
+
+ if (!isSilenced)
+ Diag(Loc, diag::warn_deprecated) << D->getDeclName();
+ }
+
+ // See if this is a deleted function.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isDeleted()) {
+ Diag(Loc, diag::err_deleted_function_use);
+ Diag(D->getLocation(), diag::note_unavailable_here) << true;
+ return true;
+ }
+ }
+
+ // See if the decl is unavailable
+ if (D->getAttr<UnavailableAttr>()) {
+ Diag(Loc, diag::warn_unavailable) << D->getDeclName();
+ Diag(D->getLocation(), diag::note_unavailable_here) << 0;
+ }
+
+ return false;
+}
+
+/// DiagnoseSentinelCalls - This routine checks on method dispatch calls
+/// (and other functions in future), which have been declared with sentinel
+/// attribute. It warns if call does not have the sentinel argument.
+///
+void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
+ Expr **Args, unsigned NumArgs)
+{
+ const SentinelAttr *attr = D->getAttr<SentinelAttr>();
+ if (!attr)
+ return;
+ int sentinelPos = attr->getSentinel();
+ int nullPos = attr->getNullPos();
+
+ // FIXME. ObjCMethodDecl and FunctionDecl need be derived from the same common
+ // base class. Then we won't be needing two versions of the same code.
+ unsigned int i = 0;
+ bool warnNotEnoughArgs = false;
+ int isMethod = 0;
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ // skip over named parameters.
+ ObjCMethodDecl::param_iterator P, E = MD->param_end();
+ for (P = MD->param_begin(); (P != E && i < NumArgs); ++P) {
+ if (nullPos)
+ --nullPos;
+ else
+ ++i;
+ }
+ warnNotEnoughArgs = (P != E || i >= NumArgs);
+ isMethod = 1;
+ }
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // skip over named parameters.
+ ObjCMethodDecl::param_iterator P, E = FD->param_end();
+ for (P = FD->param_begin(); (P != E && i < NumArgs); ++P) {
+ if (nullPos)
+ --nullPos;
+ else
+ ++i;
+ }
+ warnNotEnoughArgs = (P != E || i >= NumArgs);
+ }
+ else if (VarDecl *V = dyn_cast<VarDecl>(D)) {
+ // block or function pointer call.
+ QualType Ty = V->getType();
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
+ const FunctionType *FT = Ty->isFunctionPointerType()
+ ? Ty->getAsPointerType()->getPointeeType()->getAsFunctionType()
+ : Ty->getAsBlockPointerType()->getPointeeType()->getAsFunctionType();
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT)) {
+ unsigned NumArgsInProto = Proto->getNumArgs();
+ unsigned k;
+ for (k = 0; (k != NumArgsInProto && i < NumArgs); k++) {
+ if (nullPos)
+ --nullPos;
+ else
+ ++i;
+ }
+ warnNotEnoughArgs = (k != NumArgsInProto || i >= NumArgs);
+ }
+ if (Ty->isBlockPointerType())
+ isMethod = 2;
+ }
+ else
+ return;
+ }
+ else
+ return;
+
+ if (warnNotEnoughArgs) {
+ Diag(Loc, diag::warn_not_enough_argument) << D->getDeclName();
+ Diag(D->getLocation(), diag::note_sentinel_here) << isMethod;
+ return;
+ }
+ int sentinel = i;
+ while (sentinelPos > 0 && i < NumArgs-1) {
+ --sentinelPos;
+ ++i;
+ }
+ if (sentinelPos > 0) {
+ Diag(Loc, diag::warn_not_enough_argument) << D->getDeclName();
+ Diag(D->getLocation(), diag::note_sentinel_here) << isMethod;
+ return;
+ }
+ while (i < NumArgs-1) {
+ ++i;
+ ++sentinel;
+ }
+ Expr *sentinelExpr = Args[sentinel];
+ if (sentinelExpr && (!sentinelExpr->getType()->isPointerType() ||
+ !sentinelExpr->isNullPointerConstant(Context))) {
+ Diag(Loc, diag::warn_missing_sentinel) << isMethod;
+ Diag(D->getLocation(), diag::note_sentinel_here) << isMethod;
+ }
+ return;
+}
+
+SourceRange Sema::getExprRange(ExprTy *E) const {
+ Expr *Ex = (Expr *)E;
+ return Ex? Ex->getSourceRange() : SourceRange();
+}
+
+//===----------------------------------------------------------------------===//
+// Standard Promotions and Conversions
+//===----------------------------------------------------------------------===//
+
+/// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4).
+void Sema::DefaultFunctionArrayConversion(Expr *&E) {
+ QualType Ty = E->getType();
+ assert(!Ty.isNull() && "DefaultFunctionArrayConversion - missing type");
+
+ if (Ty->isFunctionType())
+ ImpCastExprToType(E, Context.getPointerType(Ty));
+ else if (Ty->isArrayType()) {
+ // In C90 mode, arrays only promote to pointers if the array expression is
+ // an lvalue. The relevant legalese is C90 6.2.2.1p3: "an lvalue that has
+ // type 'array of type' is converted to an expression that has type 'pointer
+ // to type'...". In C99 this was changed to: C99 6.3.2.1p3: "an expression
+ // that has type 'array of type' ...". The relevant change is "an lvalue"
+ // (C90) to "an expression" (C99).
+ //
+ // C++ 4.2p1:
+ // An lvalue or rvalue of type "array of N T" or "array of unknown bound of
+ // T" can be converted to an rvalue of type "pointer to T".
+ //
+ if (getLangOptions().C99 || getLangOptions().CPlusPlus ||
+ E->isLvalue(Context) == Expr::LV_Valid)
+ ImpCastExprToType(E, Context.getArrayDecayedType(Ty));
+ }
+}
+
+/// \brief Whether this is a promotable bitfield reference according
+/// to C99 6.3.1.1p2, bullet 2.
+///
+/// \returns the type this bit-field will promote to, or NULL if no
+/// promotion occurs.
+static QualType isPromotableBitField(Expr *E, ASTContext &Context) {
+ FieldDecl *Field = E->getBitField();
+ if (!Field)
+ return QualType();
+
+ const BuiltinType *BT = Field->getType()->getAsBuiltinType();
+ if (!BT)
+ return QualType();
+
+ if (BT->getKind() != BuiltinType::Bool &&
+ BT->getKind() != BuiltinType::Int &&
+ BT->getKind() != BuiltinType::UInt)
+ return QualType();
+
+ llvm::APSInt BitWidthAP;
+ if (!Field->getBitWidth()->isIntegerConstantExpr(BitWidthAP, Context))
+ return QualType();
+
+ uint64_t BitWidth = BitWidthAP.getZExtValue();
+ uint64_t IntSize = Context.getTypeSize(Context.IntTy);
+ if (BitWidth < IntSize ||
+ (Field->getType()->isSignedIntegerType() && BitWidth == IntSize))
+ return Context.IntTy;
+
+ if (BitWidth == IntSize && Field->getType()->isUnsignedIntegerType())
+ return Context.UnsignedIntTy;
+
+ return QualType();
+}
+
+/// UsualUnaryConversions - Performs various conversions that are common to most
+/// operators (C99 6.3). The conversions of array and function types are
+/// sometimes surpressed. For example, the array->pointer conversion doesn't
+/// apply if the array is an argument to the sizeof or address (&) operators.
+/// In these instances, this routine should *not* be called.
+Expr *Sema::UsualUnaryConversions(Expr *&Expr) {
+ QualType Ty = Expr->getType();
+ assert(!Ty.isNull() && "UsualUnaryConversions - missing type");
+
+ // C99 6.3.1.1p2:
+ //
+ // The following may be used in an expression wherever an int or
+ // unsigned int may be used:
+ // - an object or expression with an integer type whose integer
+ // conversion rank is less than or equal to the rank of int
+ // and unsigned int.
+ // - A bit-field of type _Bool, int, signed int, or unsigned int.
+ //
+ // If an int can represent all values of the original type, the
+ // value is converted to an int; otherwise, it is converted to an
+ // unsigned int. These are called the integer promotions. All
+ // other types are unchanged by the integer promotions.
+ if (Ty->isPromotableIntegerType()) {
+ ImpCastExprToType(Expr, Context.IntTy);
+ return Expr;
+ } else {
+ QualType T = isPromotableBitField(Expr, Context);
+ if (!T.isNull()) {
+ ImpCastExprToType(Expr, T);
+ return Expr;
+ }
+ }
+
+ DefaultFunctionArrayConversion(Expr);
+ return Expr;
+}
+
+/// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
+/// do not have a prototype. Arguments that have type float are promoted to
+/// double. All other argument types are converted by UsualUnaryConversions().
+void Sema::DefaultArgumentPromotion(Expr *&Expr) {
+ QualType Ty = Expr->getType();
+ assert(!Ty.isNull() && "DefaultArgumentPromotion - missing type");
+
+ // If this is a 'float' (CVR qualified or typedef) promote to double.
+ if (const BuiltinType *BT = Ty->getAsBuiltinType())
+ if (BT->getKind() == BuiltinType::Float)
+ return ImpCastExprToType(Expr, Context.DoubleTy);
+
+ UsualUnaryConversions(Expr);
+}
+
+/// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
+/// will warn if the resulting type is not a POD type, and rejects ObjC
+/// interfaces passed by value. This returns true if the argument type is
+/// completely illegal.
+bool Sema::DefaultVariadicArgumentPromotion(Expr *&Expr, VariadicCallType CT) {
+ DefaultArgumentPromotion(Expr);
+
+ if (Expr->getType()->isObjCInterfaceType()) {
+ Diag(Expr->getLocStart(),
+ diag::err_cannot_pass_objc_interface_to_vararg)
+ << Expr->getType() << CT;
+ return true;
+ }
+
+ if (!Expr->getType()->isPODType())
+ Diag(Expr->getLocStart(), diag::warn_cannot_pass_non_pod_arg_to_vararg)
+ << Expr->getType() << CT;
+
+ return false;
+}
+
+
+/// UsualArithmeticConversions - Performs various conversions that are common to
+/// binary operators (C99 6.3.1.8). If both operands aren't arithmetic, this
+/// routine returns the first non-arithmetic type found. The client is
+/// responsible for emitting appropriate error diagnostics.
+/// FIXME: verify the conversion rules for "complex int" are consistent with
+/// GCC.
+QualType Sema::UsualArithmeticConversions(Expr *&lhsExpr, Expr *&rhsExpr,
+ bool isCompAssign) {
+ if (!isCompAssign)
+ UsualUnaryConversions(lhsExpr);
+
+ UsualUnaryConversions(rhsExpr);
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType lhs =
+ Context.getCanonicalType(lhsExpr->getType()).getUnqualifiedType();
+ QualType rhs =
+ Context.getCanonicalType(rhsExpr->getType()).getUnqualifiedType();
+
+ // If both types are identical, no conversion is needed.
+ if (lhs == rhs)
+ return lhs;
+
+ // If either side is a non-arithmetic type (e.g. a pointer), we are done.
+ // The caller can deal with this (e.g. pointer + int).
+ if (!lhs->isArithmeticType() || !rhs->isArithmeticType())
+ return lhs;
+
+ // Perform bitfield promotions.
+ QualType LHSBitfieldPromoteTy = isPromotableBitField(lhsExpr, Context);
+ if (!LHSBitfieldPromoteTy.isNull())
+ lhs = LHSBitfieldPromoteTy;
+ QualType RHSBitfieldPromoteTy = isPromotableBitField(rhsExpr, Context);
+ if (!RHSBitfieldPromoteTy.isNull())
+ rhs = RHSBitfieldPromoteTy;
+
+ QualType destType = UsualArithmeticConversionsType(lhs, rhs);
+ if (!isCompAssign)
+ ImpCastExprToType(lhsExpr, destType);
+ ImpCastExprToType(rhsExpr, destType);
+ return destType;
+}
+
+QualType Sema::UsualArithmeticConversionsType(QualType lhs, QualType rhs) {
+ // Perform the usual unary conversions. We do this early so that
+ // integral promotions to "int" can allow us to exit early, in the
+ // lhs == rhs check. Also, for conversion purposes, we ignore any
+ // qualifiers. For example, "const float" and "float" are
+ // equivalent.
+ if (lhs->isPromotableIntegerType())
+ lhs = Context.IntTy;
+ else
+ lhs = lhs.getUnqualifiedType();
+ if (rhs->isPromotableIntegerType())
+ rhs = Context.IntTy;
+ else
+ rhs = rhs.getUnqualifiedType();
+
+ // If both types are identical, no conversion is needed.
+ if (lhs == rhs)
+ return lhs;
+
+ // If either side is a non-arithmetic type (e.g. a pointer), we are done.
+ // The caller can deal with this (e.g. pointer + int).
+ if (!lhs->isArithmeticType() || !rhs->isArithmeticType())
+ return lhs;
+
+ // At this point, we have two different arithmetic types.
+
+ // Handle complex types first (C99 6.3.1.8p1).
+ if (lhs->isComplexType() || rhs->isComplexType()) {
+ // if we have an integer operand, the result is the complex type.
+ if (rhs->isIntegerType() || rhs->isComplexIntegerType()) {
+ // convert the rhs to the lhs complex type.
+ return lhs;
+ }
+ if (lhs->isIntegerType() || lhs->isComplexIntegerType()) {
+ // convert the lhs to the rhs complex type.
+ return rhs;
+ }
+ // This handles complex/complex, complex/float, or float/complex.
+ // When both operands are complex, the shorter operand is converted to the
+ // type of the longer, and that is the type of the result. This corresponds
+ // to what is done when combining two real floating-point operands.
+ // The fun begins when size promotion occur across type domains.
+ // From H&S 6.3.4: When one operand is complex and the other is a real
+ // floating-point type, the less precise type is converted, within it's
+ // real or complex domain, to the precision of the other type. For example,
+ // when combining a "long double" with a "double _Complex", the
+ // "double _Complex" is promoted to "long double _Complex".
+ int result = Context.getFloatingTypeOrder(lhs, rhs);
+
+ if (result > 0) { // The left side is bigger, convert rhs.
+ rhs = Context.getFloatingTypeOfSizeWithinDomain(lhs, rhs);
+ } else if (result < 0) { // The right side is bigger, convert lhs.
+ lhs = Context.getFloatingTypeOfSizeWithinDomain(rhs, lhs);
+ }
+ // At this point, lhs and rhs have the same rank/size. Now, make sure the
+ // domains match. This is a requirement for our implementation, C99
+ // does not require this promotion.
+ if (lhs != rhs) { // Domains don't match, we have complex/float mix.
+ if (lhs->isRealFloatingType()) { // handle "double, _Complex double".
+ return rhs;
+ } else { // handle "_Complex double, double".
+ return lhs;
+ }
+ }
+ return lhs; // The domain/size match exactly.
+ }
+ // Now handle "real" floating types (i.e. float, double, long double).
+ if (lhs->isRealFloatingType() || rhs->isRealFloatingType()) {
+ // if we have an integer operand, the result is the real floating type.
+ if (rhs->isIntegerType()) {
+ // convert rhs to the lhs floating point type.
+ return lhs;
+ }
+ if (rhs->isComplexIntegerType()) {
+ // convert rhs to the complex floating point type.
+ return Context.getComplexType(lhs);
+ }
+ if (lhs->isIntegerType()) {
+ // convert lhs to the rhs floating point type.
+ return rhs;
+ }
+ if (lhs->isComplexIntegerType()) {
+ // convert lhs to the complex floating point type.
+ return Context.getComplexType(rhs);
+ }
+ // We have two real floating types, float/complex combos were handled above.
+ // Convert the smaller operand to the bigger result.
+ int result = Context.getFloatingTypeOrder(lhs, rhs);
+ if (result > 0) // convert the rhs
+ return lhs;
+ assert(result < 0 && "illegal float comparison");
+ return rhs; // convert the lhs
+ }
+ if (lhs->isComplexIntegerType() || rhs->isComplexIntegerType()) {
+ // Handle GCC complex int extension.
+ const ComplexType *lhsComplexInt = lhs->getAsComplexIntegerType();
+ const ComplexType *rhsComplexInt = rhs->getAsComplexIntegerType();
+
+ if (lhsComplexInt && rhsComplexInt) {
+ if (Context.getIntegerTypeOrder(lhsComplexInt->getElementType(),
+ rhsComplexInt->getElementType()) >= 0)
+ return lhs; // convert the rhs
+ return rhs;
+ } else if (lhsComplexInt && rhs->isIntegerType()) {
+ // convert the rhs to the lhs complex type.
+ return lhs;
+ } else if (rhsComplexInt && lhs->isIntegerType()) {
+ // convert the lhs to the rhs complex type.
+ return rhs;
+ }
+ }
+ // Finally, we have two differing integer types.
+ // The rules for this case are in C99 6.3.1.8
+ int compare = Context.getIntegerTypeOrder(lhs, rhs);
+ bool lhsSigned = lhs->isSignedIntegerType(),
+ rhsSigned = rhs->isSignedIntegerType();
+ QualType destType;
+ if (lhsSigned == rhsSigned) {
+ // Same signedness; use the higher-ranked type
+ destType = compare >= 0 ? lhs : rhs;
+ } else if (compare != (lhsSigned ? 1 : -1)) {
+ // The unsigned type has greater than or equal rank to the
+ // signed type, so use the unsigned type
+ destType = lhsSigned ? rhs : lhs;
+ } else if (Context.getIntWidth(lhs) != Context.getIntWidth(rhs)) {
+ // The two types are different widths; if we are here, that
+ // means the signed type is larger than the unsigned type, so
+ // use the signed type.
+ destType = lhsSigned ? lhs : rhs;
+ } else {
+ // The signed type is higher-ranked than the unsigned type,
+ // but isn't actually any bigger (like unsigned int and long
+ // on most 32-bit systems). Use the unsigned type corresponding
+ // to the signed type.
+ destType = Context.getCorrespondingUnsignedType(lhsSigned ? lhs : rhs);
+ }
+ return destType;
+}
+
+//===----------------------------------------------------------------------===//
+// Semantic Analysis for various Expression Types
+//===----------------------------------------------------------------------===//
+
+
+/// ActOnStringLiteral - The specified tokens were lexed as pasted string
+/// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string
+/// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from
+/// multiple tokens. However, the common case is that StringToks points to one
+/// string.
+///
+Action::OwningExprResult
+Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) {
+ assert(NumStringToks && "Must have at least one string!");
+
+ StringLiteralParser Literal(StringToks, NumStringToks, PP);
+ if (Literal.hadError)
+ return ExprError();
+
+ llvm::SmallVector<SourceLocation, 4> StringTokLocs;
+ for (unsigned i = 0; i != NumStringToks; ++i)
+ StringTokLocs.push_back(StringToks[i].getLocation());
+
+ QualType StrTy = Context.CharTy;
+ if (Literal.AnyWide) StrTy = Context.getWCharType();
+ if (Literal.Pascal) StrTy = Context.UnsignedCharTy;
+
+ // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
+ if (getLangOptions().CPlusPlus)
+ StrTy.addConst();
+
+ // Get an array type for the string, according to C99 6.4.5. This includes
+ // the nul terminator character as well as the string length for pascal
+ // strings.
+ StrTy = Context.getConstantArrayType(StrTy,
+ llvm::APInt(32, Literal.GetNumStringChars()+1),
+ ArrayType::Normal, 0);
+
+ // Pass &StringTokLocs[0], StringTokLocs.size() to factory!
+ return Owned(StringLiteral::Create(Context, Literal.GetString(),
+ Literal.GetStringLength(),
+ Literal.AnyWide, StrTy,
+ &StringTokLocs[0],
+ StringTokLocs.size()));
+}
+
+/// ShouldSnapshotBlockValueReference - Return true if a reference inside of
+/// CurBlock to VD should cause it to be snapshotted (as we do for auto
+/// variables defined outside the block) or false if this is not needed (e.g.
+/// for values inside the block or for globals).
+///
+/// This also keeps the 'hasBlockDeclRefExprs' in the BlockSemaInfo records
+/// up-to-date.
+///
+static bool ShouldSnapshotBlockValueReference(BlockSemaInfo *CurBlock,
+ ValueDecl *VD) {
+ // If the value is defined inside the block, we couldn't snapshot it even if
+ // we wanted to.
+ if (CurBlock->TheDecl == VD->getDeclContext())
+ return false;
+
+ // If this is an enum constant or function, it is constant, don't snapshot.
+ if (isa<EnumConstantDecl>(VD) || isa<FunctionDecl>(VD))
+ return false;
+
+ // If this is a reference to an extern, static, or global variable, no need to
+ // snapshot it.
+ // FIXME: What about 'const' variables in C++?
+ if (const VarDecl *Var = dyn_cast<VarDecl>(VD))
+ if (!Var->hasLocalStorage())
+ return false;
+
+ // Blocks that have these can't be constant.
+ CurBlock->hasBlockDeclRefExprs = true;
+
+ // If we have nested blocks, the decl may be declared in an outer block (in
+ // which case that outer block doesn't get "hasBlockDeclRefExprs") or it may
+ // be defined outside all of the current blocks (in which case the blocks do
+ // all get the bit). Walk the nesting chain.
+ for (BlockSemaInfo *NextBlock = CurBlock->PrevBlockInfo; NextBlock;
+ NextBlock = NextBlock->PrevBlockInfo) {
+ // If we found the defining block for the variable, don't mark the block as
+ // having a reference outside it.
+ if (NextBlock->TheDecl == VD->getDeclContext())
+ break;
+
+ // Otherwise, the DeclRef from the inner block causes the outer one to need
+ // a snapshot as well.
+ NextBlock->hasBlockDeclRefExprs = true;
+ }
+
+ return true;
+}
+
+
+
+/// ActOnIdentifierExpr - The parser read an identifier in expression context,
+/// validate it per-C99 6.5.1. HasTrailingLParen indicates whether this
+/// identifier is used in a function call context.
+/// SS is only used for a C++ qualified-id (foo::bar) to indicate the
+/// class or namespace that the identifier must be a member of.
+Sema::OwningExprResult Sema::ActOnIdentifierExpr(Scope *S, SourceLocation Loc,
+ IdentifierInfo &II,
+ bool HasTrailingLParen,
+ const CXXScopeSpec *SS,
+ bool isAddressOfOperand) {
+ return ActOnDeclarationNameExpr(S, Loc, &II, HasTrailingLParen, SS,
+ isAddressOfOperand);
+}
+
+/// BuildDeclRefExpr - Build either a DeclRefExpr or a
+/// QualifiedDeclRefExpr based on whether or not SS is a
+/// nested-name-specifier.
+DeclRefExpr *
+Sema::BuildDeclRefExpr(NamedDecl *D, QualType Ty, SourceLocation Loc,
+ bool TypeDependent, bool ValueDependent,
+ const CXXScopeSpec *SS) {
+ if (SS && !SS->isEmpty()) {
+ return new (Context) QualifiedDeclRefExpr(D, Ty, Loc, TypeDependent,
+ ValueDependent, SS->getRange(),
+ static_cast<NestedNameSpecifier *>(SS->getScopeRep()));
+ } else
+ return new (Context) DeclRefExpr(D, Ty, Loc, TypeDependent, ValueDependent);
+}
+
+/// getObjectForAnonymousRecordDecl - Retrieve the (unnamed) field or
+/// variable corresponding to the anonymous union or struct whose type
+/// is Record.
+static Decl *getObjectForAnonymousRecordDecl(ASTContext &Context,
+ RecordDecl *Record) {
+ assert(Record->isAnonymousStructOrUnion() &&
+ "Record must be an anonymous struct or union!");
+
+ // FIXME: Once Decls are directly linked together, this will be an O(1)
+ // operation rather than a slow walk through DeclContext's vector (which
+ // itself will be eliminated). DeclGroups might make this even better.
+ DeclContext *Ctx = Record->getDeclContext();
+ for (DeclContext::decl_iterator D = Ctx->decls_begin(Context),
+ DEnd = Ctx->decls_end(Context);
+ D != DEnd; ++D) {
+ if (*D == Record) {
+ // The object for the anonymous struct/union directly
+ // follows its type in the list of declarations.
+ ++D;
+ assert(D != DEnd && "Missing object for anonymous record");
+ assert(!cast<NamedDecl>(*D)->getDeclName() && "Decl should be unnamed");
+ return *D;
+ }
+ }
+
+ assert(false && "Missing object for anonymous record");
+ return 0;
+}
+
+/// \brief Given a field that represents a member of an anonymous
+/// struct/union, build the path from that field's context to the
+/// actual member.
+///
+/// Construct the sequence of field member references we'll have to
+/// perform to get to the field in the anonymous union/struct. The
+/// list of members is built from the field outward, so traverse it
+/// backwards to go from an object in the current context to the field
+/// we found.
+///
+/// \returns The variable from which the field access should begin,
+/// for an anonymous struct/union that is not a member of another
+/// class. Otherwise, returns NULL.
+VarDecl *Sema::BuildAnonymousStructUnionMemberPath(FieldDecl *Field,
+ llvm::SmallVectorImpl<FieldDecl *> &Path) {
+ assert(Field->getDeclContext()->isRecord() &&
+ cast<RecordDecl>(Field->getDeclContext())->isAnonymousStructOrUnion()
+ && "Field must be stored inside an anonymous struct or union");
+
+ Path.push_back(Field);
+ VarDecl *BaseObject = 0;
+ DeclContext *Ctx = Field->getDeclContext();
+ do {
+ RecordDecl *Record = cast<RecordDecl>(Ctx);
+ Decl *AnonObject = getObjectForAnonymousRecordDecl(Context, Record);
+ if (FieldDecl *AnonField = dyn_cast<FieldDecl>(AnonObject))
+ Path.push_back(AnonField);
+ else {
+ BaseObject = cast<VarDecl>(AnonObject);
+ break;
+ }
+ Ctx = Ctx->getParent();
+ } while (Ctx->isRecord() &&
+ cast<RecordDecl>(Ctx)->isAnonymousStructOrUnion());
+
+ return BaseObject;
+}
+
+Sema::OwningExprResult
+Sema::BuildAnonymousStructUnionMemberReference(SourceLocation Loc,
+ FieldDecl *Field,
+ Expr *BaseObjectExpr,
+ SourceLocation OpLoc) {
+ llvm::SmallVector<FieldDecl *, 4> AnonFields;
+ VarDecl *BaseObject = BuildAnonymousStructUnionMemberPath(Field,
+ AnonFields);
+
+ // Build the expression that refers to the base object, from
+ // which we will build a sequence of member references to each
+ // of the anonymous union objects and, eventually, the field we
+ // found via name lookup.
+ bool BaseObjectIsPointer = false;
+ unsigned ExtraQuals = 0;
+ if (BaseObject) {
+ // BaseObject is an anonymous struct/union variable (and is,
+ // therefore, not part of another non-anonymous record).
+ if (BaseObjectExpr) BaseObjectExpr->Destroy(Context);
+ BaseObjectExpr = new (Context) DeclRefExpr(BaseObject,BaseObject->getType(),
+ SourceLocation());
+ ExtraQuals
+ = Context.getCanonicalType(BaseObject->getType()).getCVRQualifiers();
+ } else if (BaseObjectExpr) {
+ // The caller provided the base object expression. Determine
+ // whether its a pointer and whether it adds any qualifiers to the
+ // anonymous struct/union fields we're looking into.
+ QualType ObjectType = BaseObjectExpr->getType();
+ if (const PointerType *ObjectPtr = ObjectType->getAsPointerType()) {
+ BaseObjectIsPointer = true;
+ ObjectType = ObjectPtr->getPointeeType();
+ }
+ ExtraQuals = Context.getCanonicalType(ObjectType).getCVRQualifiers();
+ } else {
+ // We've found a member of an anonymous struct/union that is
+ // inside a non-anonymous struct/union, so in a well-formed
+ // program our base object expression is "this".
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext)) {
+ if (!MD->isStatic()) {
+ QualType AnonFieldType
+ = Context.getTagDeclType(
+ cast<RecordDecl>(AnonFields.back()->getDeclContext()));
+ QualType ThisType = Context.getTagDeclType(MD->getParent());
+ if ((Context.getCanonicalType(AnonFieldType)
+ == Context.getCanonicalType(ThisType)) ||
+ IsDerivedFrom(ThisType, AnonFieldType)) {
+ // Our base object expression is "this".
+ BaseObjectExpr = new (Context) CXXThisExpr(SourceLocation(),
+ MD->getThisType(Context));
+ BaseObjectIsPointer = true;
+ }
+ } else {
+ return ExprError(Diag(Loc,diag::err_invalid_member_use_in_static_method)
+ << Field->getDeclName());
+ }
+ ExtraQuals = MD->getTypeQualifiers();
+ }
+
+ if (!BaseObjectExpr)
+ return ExprError(Diag(Loc, diag::err_invalid_non_static_member_use)
+ << Field->getDeclName());
+ }
+
+ // Build the implicit member references to the field of the
+ // anonymous struct/union.
+ Expr *Result = BaseObjectExpr;
+ for (llvm::SmallVector<FieldDecl *, 4>::reverse_iterator
+ FI = AnonFields.rbegin(), FIEnd = AnonFields.rend();
+ FI != FIEnd; ++FI) {
+ QualType MemberType = (*FI)->getType();
+ if (!(*FI)->isMutable()) {
+ unsigned combinedQualifiers
+ = MemberType.getCVRQualifiers() | ExtraQuals;
+ MemberType = MemberType.getQualifiedType(combinedQualifiers);
+ }
+ Result = new (Context) MemberExpr(Result, BaseObjectIsPointer, *FI,
+ OpLoc, MemberType);
+ BaseObjectIsPointer = false;
+ ExtraQuals = Context.getCanonicalType(MemberType).getCVRQualifiers();
+ }
+
+ return Owned(Result);
+}
+
+/// ActOnDeclarationNameExpr - The parser has read some kind of name
+/// (e.g., a C++ id-expression (C++ [expr.prim]p1)). This routine
+/// performs lookup on that name and returns an expression that refers
+/// to that name. This routine isn't directly called from the parser,
+/// because the parser doesn't know about DeclarationName. Rather,
+/// this routine is called by ActOnIdentifierExpr,
+/// ActOnOperatorFunctionIdExpr, and ActOnConversionFunctionExpr,
+/// which form the DeclarationName from the corresponding syntactic
+/// forms.
+///
+/// HasTrailingLParen indicates whether this identifier is used in a
+/// function call context. LookupCtx is only used for a C++
+/// qualified-id (foo::bar) to indicate the class or namespace that
+/// the identifier must be a member of.
+///
+/// isAddressOfOperand means that this expression is the direct operand
+/// of an address-of operator. This matters because this is the only
+/// situation where a qualified name referencing a non-static member may
+/// appear outside a member function of this class.
+Sema::OwningExprResult
+Sema::ActOnDeclarationNameExpr(Scope *S, SourceLocation Loc,
+ DeclarationName Name, bool HasTrailingLParen,
+ const CXXScopeSpec *SS,
+ bool isAddressOfOperand) {
+ // Could be enum-constant, value decl, instance variable, etc.
+ if (SS && SS->isInvalid())
+ return ExprError();
+
+ // C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ // -- a nested-name-specifier that contains a class-name that
+ // names a dependent type.
+ // FIXME: Member of the current instantiation.
+ if (SS && isDependentScopeSpecifier(*SS)) {
+ return Owned(new (Context) UnresolvedDeclRefExpr(Name, Context.DependentTy,
+ Loc, SS->getRange(),
+ static_cast<NestedNameSpecifier *>(SS->getScopeRep())));
+ }
+
+ LookupResult Lookup = LookupParsedName(S, SS, Name, LookupOrdinaryName,
+ false, true, Loc);
+
+ if (Lookup.isAmbiguous()) {
+ DiagnoseAmbiguousLookup(Lookup, Name, Loc,
+ SS && SS->isSet() ? SS->getRange()
+ : SourceRange());
+ return ExprError();
+ }
+
+ NamedDecl *D = Lookup.getAsDecl();
+
+ // If this reference is in an Objective-C method, then ivar lookup happens as
+ // well.
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ if (II && getCurMethodDecl()) {
+ // There are two cases to handle here. 1) scoped lookup could have failed,
+ // in which case we should look for an ivar. 2) scoped lookup could have
+ // found a decl, but that decl is outside the current instance method (i.e.
+ // a global variable). In these two cases, we do a lookup for an ivar with
+ // this name, if the lookup sucedes, we replace it our current decl.
+ if (D == 0 || D->isDefinedOutsideFunctionOrMethod()) {
+ ObjCInterfaceDecl *IFace = getCurMethodDecl()->getClassInterface();
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(Context, II,
+ ClassDeclared)) {
+ // Check if referencing a field with __attribute__((deprecated)).
+ if (DiagnoseUseOfDecl(IV, Loc))
+ return ExprError();
+
+ // If we're referencing an invalid decl, just return this as a silent
+ // error node. The error diagnostic was already emitted on the decl.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ bool IsClsMethod = getCurMethodDecl()->isClassMethod();
+ // If a class method attemps to use a free standing ivar, this is
+ // an error.
+ if (IsClsMethod && D && !D->isDefinedOutsideFunctionOrMethod())
+ return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ << IV->getDeclName());
+ // If a class method uses a global variable, even if an ivar with
+ // same name exists, use the global.
+ if (!IsClsMethod) {
+ if (IV->getAccessControl() == ObjCIvarDecl::Private &&
+ ClassDeclared != IFace)
+ Diag(Loc, diag::error_private_ivar_access) << IV->getDeclName();
+ // FIXME: This should use a new expr for a direct reference, don't
+ // turn this into Self->ivar, just return a BareIVarExpr or something.
+ IdentifierInfo &II = Context.Idents.get("self");
+ OwningExprResult SelfExpr = ActOnIdentifierExpr(S, Loc, II, false);
+ return Owned(new (Context)
+ ObjCIvarRefExpr(IV, IV->getType(), Loc,
+ SelfExpr.takeAs<Expr>(), true, true));
+ }
+ }
+ }
+ else if (getCurMethodDecl()->isInstanceMethod()) {
+ // We should warn if a local variable hides an ivar.
+ ObjCInterfaceDecl *IFace = getCurMethodDecl()->getClassInterface();
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(Context, II,
+ ClassDeclared)) {
+ if (IV->getAccessControl() != ObjCIvarDecl::Private ||
+ IFace == ClassDeclared)
+ Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName();
+ }
+ }
+ // Needed to implement property "super.method" notation.
+ if (D == 0 && II->isStr("super")) {
+ QualType T;
+
+ if (getCurMethodDecl()->isInstanceMethod())
+ T = Context.getPointerType(Context.getObjCInterfaceType(
+ getCurMethodDecl()->getClassInterface()));
+ else
+ T = Context.getObjCClassType();
+ return Owned(new (Context) ObjCSuperExpr(Loc, T));
+ }
+ }
+
+ // Determine whether this name might be a candidate for
+ // argument-dependent lookup.
+ bool ADL = getLangOptions().CPlusPlus && (!SS || !SS->isSet()) &&
+ HasTrailingLParen;
+
+ if (ADL && D == 0) {
+ // We've seen something of the form
+ //
+ // identifier(
+ //
+ // and we did not find any entity by the name
+ // "identifier". However, this identifier is still subject to
+ // argument-dependent lookup, so keep track of the name.
+ return Owned(new (Context) UnresolvedFunctionNameExpr(Name,
+ Context.OverloadTy,
+ Loc));
+ }
+
+ if (D == 0) {
+ // Otherwise, this could be an implicitly declared function reference (legal
+ // in C90, extension in C99).
+ if (HasTrailingLParen && II &&
+ !getLangOptions().CPlusPlus) // Not in C++.
+ D = ImplicitlyDefineFunction(Loc, *II, S);
+ else {
+ // If this name wasn't predeclared and if this is not a function call,
+ // diagnose the problem.
+ if (SS && !SS->isEmpty())
+ return ExprError(Diag(Loc, diag::err_typecheck_no_member)
+ << Name << SS->getRange());
+ else if (Name.getNameKind() == DeclarationName::CXXOperatorName ||
+ Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
+ return ExprError(Diag(Loc, diag::err_undeclared_use)
+ << Name.getAsString());
+ else
+ return ExprError(Diag(Loc, diag::err_undeclared_var_use) << Name);
+ }
+ }
+
+ // If this is an expression of the form &Class::member, don't build an
+ // implicit member ref, because we want a pointer to the member in general,
+ // not any specific instance's member.
+ if (isAddressOfOperand && SS && !SS->isEmpty() && !HasTrailingLParen) {
+ DeclContext *DC = computeDeclContext(*SS);
+ if (D && isa<CXXRecordDecl>(DC)) {
+ QualType DType;
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ DType = FD->getType().getNonReferenceType();
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ DType = Method->getType();
+ } else if (isa<OverloadedFunctionDecl>(D)) {
+ DType = Context.OverloadTy;
+ }
+ // Could be an inner type. That's diagnosed below, so ignore it here.
+ if (!DType.isNull()) {
+ // The pointer is type- and value-dependent if it points into something
+ // dependent.
+ bool Dependent = DC->isDependentContext();
+ return Owned(BuildDeclRefExpr(D, DType, Loc, Dependent, Dependent, SS));
+ }
+ }
+ }
+
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(D))
+ if (cast<RecordDecl>(FD->getDeclContext())->isAnonymousStructOrUnion())
+ return BuildAnonymousStructUnionMemberReference(Loc, FD);
+
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext)) {
+ if (!MD->isStatic()) {
+ // C++ [class.mfct.nonstatic]p2:
+ // [...] if name lookup (3.4.1) resolves the name in the
+ // id-expression to a nonstatic nontype member of class X or of
+ // a base class of X, the id-expression is transformed into a
+ // class member access expression (5.2.5) using (*this) (9.3.2)
+ // as the postfix-expression to the left of the '.' operator.
+ DeclContext *Ctx = 0;
+ QualType MemberType;
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ Ctx = FD->getDeclContext();
+ MemberType = FD->getType();
+
+ if (const ReferenceType *RefType = MemberType->getAsReferenceType())
+ MemberType = RefType->getPointeeType();
+ else if (!FD->isMutable()) {
+ unsigned combinedQualifiers
+ = MemberType.getCVRQualifiers() | MD->getTypeQualifiers();
+ MemberType = MemberType.getQualifiedType(combinedQualifiers);
+ }
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (!Method->isStatic()) {
+ Ctx = Method->getParent();
+ MemberType = Method->getType();
+ }
+ } else if (OverloadedFunctionDecl *Ovl
+ = dyn_cast<OverloadedFunctionDecl>(D)) {
+ for (OverloadedFunctionDecl::function_iterator
+ Func = Ovl->function_begin(),
+ FuncEnd = Ovl->function_end();
+ Func != FuncEnd; ++Func) {
+ if (CXXMethodDecl *DMethod = dyn_cast<CXXMethodDecl>(*Func))
+ if (!DMethod->isStatic()) {
+ Ctx = Ovl->getDeclContext();
+ MemberType = Context.OverloadTy;
+ break;
+ }
+ }
+ }
+
+ if (Ctx && Ctx->isRecord()) {
+ QualType CtxType = Context.getTagDeclType(cast<CXXRecordDecl>(Ctx));
+ QualType ThisType = Context.getTagDeclType(MD->getParent());
+ if ((Context.getCanonicalType(CtxType)
+ == Context.getCanonicalType(ThisType)) ||
+ IsDerivedFrom(ThisType, CtxType)) {
+ // Build the implicit member access expression.
+ Expr *This = new (Context) CXXThisExpr(SourceLocation(),
+ MD->getThisType(Context));
+ return Owned(new (Context) MemberExpr(This, true, D,
+ Loc, MemberType));
+ }
+ }
+ }
+ }
+
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext)) {
+ if (MD->isStatic())
+ // "invalid use of member 'x' in static member function"
+ return ExprError(Diag(Loc,diag::err_invalid_member_use_in_static_method)
+ << FD->getDeclName());
+ }
+
+ // Any other ways we could have found the field in a well-formed
+ // program would have been turned into implicit member expressions
+ // above.
+ return ExprError(Diag(Loc, diag::err_invalid_non_static_member_use)
+ << FD->getDeclName());
+ }
+
+ if (isa<TypedefDecl>(D))
+ return ExprError(Diag(Loc, diag::err_unexpected_typedef) << Name);
+ if (isa<ObjCInterfaceDecl>(D))
+ return ExprError(Diag(Loc, diag::err_unexpected_interface) << Name);
+ if (isa<NamespaceDecl>(D))
+ return ExprError(Diag(Loc, diag::err_unexpected_namespace) << Name);
+
+ // Make the DeclRefExpr or BlockDeclRefExpr for the decl.
+ if (OverloadedFunctionDecl *Ovl = dyn_cast<OverloadedFunctionDecl>(D))
+ return Owned(BuildDeclRefExpr(Ovl, Context.OverloadTy, Loc,
+ false, false, SS));
+ else if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D))
+ return Owned(BuildDeclRefExpr(Template, Context.OverloadTy, Loc,
+ false, false, SS));
+ ValueDecl *VD = cast<ValueDecl>(D);
+
+ // Check whether this declaration can be used. Note that we suppress
+ // this check when we're going to perform argument-dependent lookup
+ // on this function name, because this might not be the function
+ // that overload resolution actually selects.
+ if (!(ADL && isa<FunctionDecl>(VD)) && DiagnoseUseOfDecl(VD, Loc))
+ return ExprError();
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD)) {
+ // Warn about constructs like:
+ // if (void *X = foo()) { ... } else { X }.
+ // In the else block, the pointer is always false.
+
+ // FIXME: In a template instantiation, we don't have scope
+ // information to check this property.
+ if (Var->isDeclaredInCondition() && Var->getType()->isScalarType()) {
+ Scope *CheckS = S;
+ while (CheckS) {
+ if (CheckS->isWithinElse() &&
+ CheckS->getControlParent()->isDeclScope(DeclPtrTy::make(Var))) {
+ if (Var->getType()->isBooleanType())
+ ExprError(Diag(Loc, diag::warn_value_always_false)
+ << Var->getDeclName());
+ else
+ ExprError(Diag(Loc, diag::warn_value_always_zero)
+ << Var->getDeclName());
+ break;
+ }
+
+ // Move up one more control parent to check again.
+ CheckS = CheckS->getControlParent();
+ if (CheckS)
+ CheckS = CheckS->getParent();
+ }
+ }
+ } else if (FunctionDecl *Func = dyn_cast<FunctionDecl>(VD)) {
+ if (!getLangOptions().CPlusPlus && !Func->hasPrototype()) {
+ // C99 DR 316 says that, if a function type comes from a
+ // function definition (without a prototype), that type is only
+ // used for checking compatibility. Therefore, when referencing
+ // the function, we pretend that we don't have the full function
+ // type.
+ QualType T = Func->getType();
+ QualType NoProtoType = T;
+ if (const FunctionProtoType *Proto = T->getAsFunctionProtoType())
+ NoProtoType = Context.getFunctionNoProtoType(Proto->getResultType());
+ return Owned(BuildDeclRefExpr(VD, NoProtoType, Loc, false, false, SS));
+ }
+ }
+
+ // Only create DeclRefExpr's for valid Decl's.
+ if (VD->isInvalidDecl())
+ return ExprError();
+
+ // If the identifier reference is inside a block, and it refers to a value
+ // that is outside the block, create a BlockDeclRefExpr instead of a
+ // DeclRefExpr. This ensures the value is treated as a copy-in snapshot when
+ // the block is formed.
+ //
+ // We do not do this for things like enum constants, global variables, etc,
+ // as they do not get snapshotted.
+ //
+ if (CurBlock && ShouldSnapshotBlockValueReference(CurBlock, VD)) {
+ QualType ExprTy = VD->getType().getNonReferenceType();
+ // The BlocksAttr indicates the variable is bound by-reference.
+ if (VD->getAttr<BlocksAttr>())
+ return Owned(new (Context) BlockDeclRefExpr(VD, ExprTy, Loc, true));
+
+ // Variable will be bound by-copy, make it const within the closure.
+ ExprTy.addConst();
+ return Owned(new (Context) BlockDeclRefExpr(VD, ExprTy, Loc, false));
+ }
+ // If this reference is not in a block or if the referenced variable is
+ // within the block, create a normal DeclRefExpr.
+
+ bool TypeDependent = false;
+ bool ValueDependent = false;
+ if (getLangOptions().CPlusPlus) {
+ // C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ // - an identifier that was declared with a dependent type,
+ if (VD->getType()->isDependentType())
+ TypeDependent = true;
+ // - FIXME: a template-id that is dependent,
+ // - a conversion-function-id that specifies a dependent type,
+ else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
+ Name.getCXXNameType()->isDependentType())
+ TypeDependent = true;
+ // - a nested-name-specifier that contains a class-name that
+ // names a dependent type.
+ else if (SS && !SS->isEmpty()) {
+ for (DeclContext *DC = computeDeclContext(*SS);
+ DC; DC = DC->getParent()) {
+ // FIXME: could stop early at namespace scope.
+ if (DC->isRecord()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ if (Context.getTypeDeclType(Record)->isDependentType()) {
+ TypeDependent = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // C++ [temp.dep.constexpr]p2:
+ //
+ // An identifier is value-dependent if it is:
+ // - a name declared with a dependent type,
+ if (TypeDependent)
+ ValueDependent = true;
+ // - the name of a non-type template parameter,
+ else if (isa<NonTypeTemplateParmDecl>(VD))
+ ValueDependent = true;
+ // - a constant with integral or enumeration type and is
+ // initialized with an expression that is value-dependent
+ // (FIXME!).
+ }
+
+ return Owned(BuildDeclRefExpr(VD, VD->getType().getNonReferenceType(), Loc,
+ TypeDependent, ValueDependent, SS));
+}
+
+Sema::OwningExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc,
+ tok::TokenKind Kind) {
+ PredefinedExpr::IdentType IT;
+
+ switch (Kind) {
+ default: assert(0 && "Unknown simple primary expr!");
+ case tok::kw___func__: IT = PredefinedExpr::Func; break; // [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break;
+ case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break;
+ }
+
+ // Pre-defined identifiers are of type char[x], where x is the length of the
+ // string.
+ unsigned Length;
+ if (FunctionDecl *FD = getCurFunctionDecl())
+ Length = FD->getIdentifier()->getLength();
+ else if (ObjCMethodDecl *MD = getCurMethodDecl())
+ Length = MD->getSynthesizedMethodSize();
+ else {
+ Diag(Loc, diag::ext_predef_outside_function);
+ // __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
+ Length = IT == PredefinedExpr::PrettyFunction ? strlen("top level") : 0;
+ }
+
+
+ llvm::APInt LengthI(32, Length + 1);
+ QualType ResTy = Context.CharTy.getQualifiedType(QualType::Const);
+ ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal, 0);
+ return Owned(new (Context) PredefinedExpr(Loc, ResTy, IT));
+}
+
+Sema::OwningExprResult Sema::ActOnCharacterConstant(const Token &Tok) {
+ llvm::SmallString<16> CharBuffer;
+ CharBuffer.resize(Tok.getLength());
+ const char *ThisTokBegin = &CharBuffer[0];
+ unsigned ActualLength = PP.getSpelling(Tok, ThisTokBegin);
+
+ CharLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
+ Tok.getLocation(), PP);
+ if (Literal.hadError())
+ return ExprError();
+
+ QualType type = getLangOptions().CPlusPlus ? Context.CharTy : Context.IntTy;
+
+ return Owned(new (Context) CharacterLiteral(Literal.getValue(),
+ Literal.isWide(),
+ type, Tok.getLocation()));
+}
+
+Action::OwningExprResult Sema::ActOnNumericConstant(const Token &Tok) {
+ // Fast path for a single digit (which is quite common). A single digit
+ // cannot have a trigraph, escaped newline, radix prefix, or type suffix.
+ if (Tok.getLength() == 1) {
+ const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok);
+ unsigned IntSize = Context.Target.getIntWidth();
+ return Owned(new (Context) IntegerLiteral(llvm::APInt(IntSize, Val-'0'),
+ Context.IntTy, Tok.getLocation()));
+ }
+
+ llvm::SmallString<512> IntegerBuffer;
+ // Add padding so that NumericLiteralParser can overread by one character.
+ IntegerBuffer.resize(Tok.getLength()+1);
+ const char *ThisTokBegin = &IntegerBuffer[0];
+
+ // Get the spelling of the token, which eliminates trigraphs, etc.
+ unsigned ActualLength = PP.getSpelling(Tok, ThisTokBegin);
+
+ NumericLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
+ Tok.getLocation(), PP);
+ if (Literal.hadError)
+ return ExprError();
+
+ Expr *Res;
+
+ if (Literal.isFloatingLiteral()) {
+ QualType Ty;
+ if (Literal.isFloat)
+ Ty = Context.FloatTy;
+ else if (!Literal.isLong)
+ Ty = Context.DoubleTy;
+ else
+ Ty = Context.LongDoubleTy;
+
+ const llvm::fltSemantics &Format = Context.getFloatTypeSemantics(Ty);
+
+ // isExact will be set by GetFloatValue().
+ bool isExact = false;
+ Res = new (Context) FloatingLiteral(Literal.GetFloatValue(Format, &isExact),
+ &isExact, Ty, Tok.getLocation());
+
+ } else if (!Literal.isIntegerLiteral()) {
+ return ExprError();
+ } else {
+ QualType Ty;
+
+ // long long is a C99 feature.
+ if (!getLangOptions().C99 && !getLangOptions().CPlusPlus0x &&
+ Literal.isLongLong)
+ Diag(Tok.getLocation(), diag::ext_longlong);
+
+ // Get the value in the widest-possible width.
+ llvm::APInt ResultVal(Context.Target.getIntMaxTWidth(), 0);
+
+ if (Literal.GetIntegerValue(ResultVal)) {
+ // If this value didn't fit into uintmax_t, warn and force to ull.
+ Diag(Tok.getLocation(), diag::warn_integer_too_large);
+ Ty = Context.UnsignedLongLongTy;
+ assert(Context.getTypeSize(Ty) == ResultVal.getBitWidth() &&
+ "long long is not intmax_t?");
+ } else {
+ // If this value fits into a ULL, try to figure out what else it fits into
+ // according to the rules of C99 6.4.4.1p5.
+
+ // Octal, Hexadecimal, and integers with a U suffix are allowed to
+ // be an unsigned int.
+ bool AllowUnsigned = Literal.isUnsigned || Literal.getRadix() != 10;
+
+ // Check from smallest to largest, picking the smallest type we can.
+ unsigned Width = 0;
+ if (!Literal.isLong && !Literal.isLongLong) {
+ // Are int/unsigned possibilities?
+ unsigned IntSize = Context.Target.getIntWidth();
+
+ // Does it fit in a unsigned int?
+ if (ResultVal.isIntN(IntSize)) {
+ // Does it fit in a signed int?
+ if (!Literal.isUnsigned && ResultVal[IntSize-1] == 0)
+ Ty = Context.IntTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedIntTy;
+ Width = IntSize;
+ }
+ }
+
+ // Are long/unsigned long possibilities?
+ if (Ty.isNull() && !Literal.isLongLong) {
+ unsigned LongSize = Context.Target.getLongWidth();
+
+ // Does it fit in a unsigned long?
+ if (ResultVal.isIntN(LongSize)) {
+ // Does it fit in a signed long?
+ if (!Literal.isUnsigned && ResultVal[LongSize-1] == 0)
+ Ty = Context.LongTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedLongTy;
+ Width = LongSize;
+ }
+ }
+
+ // Finally, check long long if needed.
+ if (Ty.isNull()) {
+ unsigned LongLongSize = Context.Target.getLongLongWidth();
+
+ // Does it fit in a unsigned long long?
+ if (ResultVal.isIntN(LongLongSize)) {
+ // Does it fit in a signed long long?
+ if (!Literal.isUnsigned && ResultVal[LongLongSize-1] == 0)
+ Ty = Context.LongLongTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedLongLongTy;
+ Width = LongLongSize;
+ }
+ }
+
+ // If we still couldn't decide a type, we probably have something that
+ // does not fit in a signed long long, but has no U suffix.
+ if (Ty.isNull()) {
+ Diag(Tok.getLocation(), diag::warn_integer_too_large_for_signed);
+ Ty = Context.UnsignedLongLongTy;
+ Width = Context.Target.getLongLongWidth();
+ }
+
+ if (ResultVal.getBitWidth() != Width)
+ ResultVal.trunc(Width);
+ }
+ Res = new (Context) IntegerLiteral(ResultVal, Ty, Tok.getLocation());
+ }
+
+ // If this is an imaginary literal, create the ImaginaryLiteral wrapper.
+ if (Literal.isImaginary)
+ Res = new (Context) ImaginaryLiteral(Res,
+ Context.getComplexType(Res->getType()));
+
+ return Owned(Res);
+}
+
+Action::OwningExprResult Sema::ActOnParenExpr(SourceLocation L,
+ SourceLocation R, ExprArg Val) {
+ Expr *E = Val.takeAs<Expr>();
+ assert((E != 0) && "ActOnParenExpr() missing expr");
+ return Owned(new (Context) ParenExpr(L, R, E));
+}
+
+/// The UsualUnaryConversions() function is *not* called by this routine.
+/// See C99 6.3.2.1p[2-4] for more details.
+bool Sema::CheckSizeOfAlignOfOperand(QualType exprType,
+ SourceLocation OpLoc,
+ const SourceRange &ExprRange,
+ bool isSizeof) {
+ if (exprType->isDependentType())
+ return false;
+
+ // C99 6.5.3.4p1:
+ if (isa<FunctionType>(exprType)) {
+ // alignof(function) is allowed as an extension.
+ if (isSizeof)
+ Diag(OpLoc, diag::ext_sizeof_function_type) << ExprRange;
+ return false;
+ }
+
+ // Allow sizeof(void)/alignof(void) as an extension.
+ if (exprType->isVoidType()) {
+ Diag(OpLoc, diag::ext_sizeof_void_type)
+ << (isSizeof ? "sizeof" : "__alignof") << ExprRange;
+ return false;
+ }
+
+ if (RequireCompleteType(OpLoc, exprType,
+ isSizeof ? diag::err_sizeof_incomplete_type :
+ diag::err_alignof_incomplete_type,
+ ExprRange))
+ return true;
+
+ // Reject sizeof(interface) and sizeof(interface<proto>) in 64-bit mode.
+ if (LangOpts.ObjCNonFragileABI && exprType->isObjCInterfaceType()) {
+ Diag(OpLoc, diag::err_sizeof_nonfragile_interface)
+ << exprType << isSizeof << ExprRange;
+ return true;
+ }
+
+ return false;
+}
+
+bool Sema::CheckAlignOfExpr(Expr *E, SourceLocation OpLoc,
+ const SourceRange &ExprRange) {
+ E = E->IgnoreParens();
+
+ // alignof decl is always ok.
+ if (isa<DeclRefExpr>(E))
+ return false;
+
+ // Cannot know anything else if the expression is dependent.
+ if (E->isTypeDependent())
+ return false;
+
+ if (E->getBitField()) {
+ Diag(OpLoc, diag::err_sizeof_alignof_bitfield) << 1 << ExprRange;
+ return true;
+ }
+
+ // Alignment of a field access is always okay, so long as it isn't a
+ // bit-field.
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ if (dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ return false;
+
+ return CheckSizeOfAlignOfOperand(E->getType(), OpLoc, ExprRange, false);
+}
+
+/// \brief Build a sizeof or alignof expression given a type operand.
+Action::OwningExprResult
+Sema::CreateSizeOfAlignOfExpr(QualType T, SourceLocation OpLoc,
+ bool isSizeOf, SourceRange R) {
+ if (T.isNull())
+ return ExprError();
+
+ if (!T->isDependentType() &&
+ CheckSizeOfAlignOfOperand(T, OpLoc, R, isSizeOf))
+ return ExprError();
+
+ // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ return Owned(new (Context) SizeOfAlignOfExpr(isSizeOf, T,
+ Context.getSizeType(), OpLoc,
+ R.getEnd()));
+}
+
+/// \brief Build a sizeof or alignof expression given an expression
+/// operand.
+Action::OwningExprResult
+Sema::CreateSizeOfAlignOfExpr(Expr *E, SourceLocation OpLoc,
+ bool isSizeOf, SourceRange R) {
+ // Verify that the operand is valid.
+ bool isInvalid = false;
+ if (E->isTypeDependent()) {
+ // Delay type-checking for type-dependent expressions.
+ } else if (!isSizeOf) {
+ isInvalid = CheckAlignOfExpr(E, OpLoc, R);
+ } else if (E->getBitField()) { // C99 6.5.3.4p1.
+ Diag(OpLoc, diag::err_sizeof_alignof_bitfield) << 0;
+ isInvalid = true;
+ } else {
+ isInvalid = CheckSizeOfAlignOfOperand(E->getType(), OpLoc, R, true);
+ }
+
+ if (isInvalid)
+ return ExprError();
+
+ // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ return Owned(new (Context) SizeOfAlignOfExpr(isSizeOf, E,
+ Context.getSizeType(), OpLoc,
+ R.getEnd()));
+}
+
+/// ActOnSizeOfAlignOfExpr - Handle @c sizeof(type) and @c sizeof @c expr and
+/// the same for @c alignof and @c __alignof
+/// Note that the ArgRange is invalid if isType is false.
+Action::OwningExprResult
+Sema::ActOnSizeOfAlignOfExpr(SourceLocation OpLoc, bool isSizeof, bool isType,
+ void *TyOrEx, const SourceRange &ArgRange) {
+ // If error parsing type, ignore.
+ if (TyOrEx == 0) return ExprError();
+
+ if (isType) {
+ QualType ArgTy = QualType::getFromOpaquePtr(TyOrEx);
+ return CreateSizeOfAlignOfExpr(ArgTy, OpLoc, isSizeof, ArgRange);
+ }
+
+ // Get the end location.
+ Expr *ArgEx = (Expr *)TyOrEx;
+ Action::OwningExprResult Result
+ = CreateSizeOfAlignOfExpr(ArgEx, OpLoc, isSizeof, ArgEx->getSourceRange());
+
+ if (Result.isInvalid())
+ DeleteExpr(ArgEx);
+
+ return move(Result);
+}
+
+QualType Sema::CheckRealImagOperand(Expr *&V, SourceLocation Loc, bool isReal) {
+ if (V->isTypeDependent())
+ return Context.DependentTy;
+
+ // These operators return the element type of a complex type.
+ if (const ComplexType *CT = V->getType()->getAsComplexType())
+ return CT->getElementType();
+
+ // Otherwise they pass through real integer and floating point types here.
+ if (V->getType()->isArithmeticType())
+ return V->getType();
+
+ // Reject anything else.
+ Diag(Loc, diag::err_realimag_invalid_type) << V->getType()
+ << (isReal ? "__real" : "__imag");
+ return QualType();
+}
+
+
+
+Action::OwningExprResult
+Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Kind, ExprArg Input) {
+ Expr *Arg = (Expr *)Input.get();
+
+ UnaryOperator::Opcode Opc;
+ switch (Kind) {
+ default: assert(0 && "Unknown unary op!");
+ case tok::plusplus: Opc = UnaryOperator::PostInc; break;
+ case tok::minusminus: Opc = UnaryOperator::PostDec; break;
+ }
+
+ if (getLangOptions().CPlusPlus &&
+ (Arg->getType()->isRecordType() || Arg->getType()->isEnumeralType())) {
+ // Which overloaded operator?
+ OverloadedOperatorKind OverOp =
+ (Opc == UnaryOperator::PostInc)? OO_PlusPlus : OO_MinusMinus;
+
+ // C++ [over.inc]p1:
+ //
+ // [...] If the function is a member function with one
+ // parameter (which shall be of type int) or a non-member
+ // function with two parameters (the second of which shall be
+ // of type int), it defines the postfix increment operator ++
+ // for objects of that type. When the postfix increment is
+ // called as a result of using the ++ operator, the int
+ // argument will have value zero.
+ Expr *Args[2] = {
+ Arg,
+ new (Context) IntegerLiteral(llvm::APInt(Context.Target.getIntWidth(), 0,
+ /*isSigned=*/true), Context.IntTy, SourceLocation())
+ };
+
+ // Build the candidate set for overloading
+ OverloadCandidateSet CandidateSet;
+ AddOperatorCandidates(OverOp, S, OpLoc, Args, 2, CandidateSet);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ if (PerformObjectArgumentInitialization(Arg, Method))
+ return ExprError();
+ } else {
+ // Convert the arguments.
+ if (PerformCopyInitialization(Arg,
+ FnDecl->getParamDecl(0)->getType(),
+ "passing"))
+ return ExprError();
+ }
+
+ // Determine the result type
+ QualType ResultTy
+ = FnDecl->getType()->getAsFunctionType()->getResultType();
+ ResultTy = ResultTy.getNonReferenceType();
+
+ // Build the actual expression node.
+ Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
+ SourceLocation());
+ UsualUnaryConversions(FnExpr);
+
+ Input.release();
+ Args[0] = Arg;
+ return Owned(new (Context) CXXOperatorCallExpr(Context, OverOp, FnExpr,
+ Args, 2, ResultTy,
+ OpLoc));
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ if (PerformCopyInitialization(Arg, Best->BuiltinTypes.ParamTypes[0],
+ "passing"))
+ return ExprError();
+
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function:
+ // No viable function; fall through to handling this as a
+ // built-in operator, which will produce an error message for us.
+ break;
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper)
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Arg->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Arg->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+ }
+
+ // Either we found no viable overloaded operator or we matched a
+ // built-in operator. In either case, fall through to trying to
+ // build a built-in operation.
+ }
+
+ QualType result = CheckIncrementDecrementOperand(Arg, OpLoc,
+ Opc == UnaryOperator::PostInc);
+ if (result.isNull())
+ return ExprError();
+ Input.release();
+ return Owned(new (Context) UnaryOperator(Arg, Opc, result, OpLoc));
+}
+
+Action::OwningExprResult
+Sema::ActOnArraySubscriptExpr(Scope *S, ExprArg Base, SourceLocation LLoc,
+ ExprArg Idx, SourceLocation RLoc) {
+ Expr *LHSExp = static_cast<Expr*>(Base.get()),
+ *RHSExp = static_cast<Expr*>(Idx.get());
+
+ if (getLangOptions().CPlusPlus &&
+ (LHSExp->isTypeDependent() || RHSExp->isTypeDependent())) {
+ Base.release();
+ Idx.release();
+ return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
+ Context.DependentTy, RLoc));
+ }
+
+ if (getLangOptions().CPlusPlus &&
+ (LHSExp->getType()->isRecordType() ||
+ LHSExp->getType()->isEnumeralType() ||
+ RHSExp->getType()->isRecordType() ||
+ RHSExp->getType()->isEnumeralType())) {
+ // Add the appropriate overloaded operators (C++ [over.match.oper])
+ // to the candidate set.
+ OverloadCandidateSet CandidateSet;
+ Expr *Args[2] = { LHSExp, RHSExp };
+ AddOperatorCandidates(OO_Subscript, S, LLoc, Args, 2, CandidateSet,
+ SourceRange(LLoc, RLoc));
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ if (PerformObjectArgumentInitialization(LHSExp, Method) ||
+ PerformCopyInitialization(RHSExp,
+ FnDecl->getParamDecl(0)->getType(),
+ "passing"))
+ return ExprError();
+ } else {
+ // Convert the arguments.
+ if (PerformCopyInitialization(LHSExp,
+ FnDecl->getParamDecl(0)->getType(),
+ "passing") ||
+ PerformCopyInitialization(RHSExp,
+ FnDecl->getParamDecl(1)->getType(),
+ "passing"))
+ return ExprError();
+ }
+
+ // Determine the result type
+ QualType ResultTy
+ = FnDecl->getType()->getAsFunctionType()->getResultType();
+ ResultTy = ResultTy.getNonReferenceType();
+
+ // Build the actual expression node.
+ Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
+ SourceLocation());
+ UsualUnaryConversions(FnExpr);
+
+ Base.release();
+ Idx.release();
+ Args[0] = LHSExp;
+ Args[1] = RHSExp;
+ return Owned(new (Context) CXXOperatorCallExpr(Context, OO_Subscript,
+ FnExpr, Args, 2,
+ ResultTy, LLoc));
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ if (PerformCopyInitialization(LHSExp, Best->BuiltinTypes.ParamTypes[0],
+ "passing") ||
+ PerformCopyInitialization(RHSExp, Best->BuiltinTypes.ParamTypes[1],
+ "passing"))
+ return ExprError();
+
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function:
+ // No viable function; fall through to handling this as a
+ // built-in operator, which will produce an error message for us.
+ break;
+
+ case OR_Ambiguous:
+ Diag(LLoc, diag::err_ovl_ambiguous_oper)
+ << "[]"
+ << LHSExp->getSourceRange() << RHSExp->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(LLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << "[]"
+ << LHSExp->getSourceRange() << RHSExp->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+ }
+
+ // Either we found no viable overloaded operator or we matched a
+ // built-in operator. In either case, fall through to trying to
+ // build a built-in operation.
+ }
+
+ // Perform default conversions.
+ DefaultFunctionArrayConversion(LHSExp);
+ DefaultFunctionArrayConversion(RHSExp);
+
+ QualType LHSTy = LHSExp->getType(), RHSTy = RHSExp->getType();
+
+ // C99 6.5.2.1p2: the expression e1[e2] is by definition precisely equivalent
+ // to the expression *((e1)+(e2)). This means the array "Base" may actually be
+ // in the subscript position. As a result, we need to derive the array base
+ // and index from the expression types.
+ Expr *BaseExpr, *IndexExpr;
+ QualType ResultType;
+ if (LHSTy->isDependentType() || RHSTy->isDependentType()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = Context.DependentTy;
+ } else if (const PointerType *PTy = LHSTy->getAsPointerType()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = PTy->getPointeeType();
+ } else if (const PointerType *PTy = RHSTy->getAsPointerType()) {
+ // Handle the uncommon case of "123[Ptr]".
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = PTy->getPointeeType();
+ } else if (const VectorType *VTy = LHSTy->getAsVectorType()) {
+ BaseExpr = LHSExp; // vectors: V[123]
+ IndexExpr = RHSExp;
+
+ // FIXME: need to deal with const...
+ ResultType = VTy->getElementType();
+ } else if (LHSTy->isArrayType()) {
+ // If we see an array that wasn't promoted by
+ // DefaultFunctionArrayConversion, it must be an array that
+ // wasn't promoted because of the C90 rule that doesn't
+ // allow promoting non-lvalue arrays. Warn, then
+ // force the promotion here.
+ Diag(LHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
+ LHSExp->getSourceRange();
+ ImpCastExprToType(LHSExp, Context.getArrayDecayedType(LHSTy));
+ LHSTy = LHSExp->getType();
+
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = LHSTy->getAsPointerType()->getPointeeType();
+ } else if (RHSTy->isArrayType()) {
+ // Same as previous, except for 123[f().a] case
+ Diag(RHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
+ RHSExp->getSourceRange();
+ ImpCastExprToType(RHSExp, Context.getArrayDecayedType(RHSTy));
+ RHSTy = RHSExp->getType();
+
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = RHSTy->getAsPointerType()->getPointeeType();
+ } else {
+ return ExprError(Diag(LLoc, diag::err_typecheck_subscript_value)
+ << LHSExp->getSourceRange() << RHSExp->getSourceRange());
+ }
+ // C99 6.5.2.1p1
+ if (!IndexExpr->getType()->isIntegerType() && !IndexExpr->isTypeDependent())
+ return ExprError(Diag(LLoc, diag::err_typecheck_subscript_not_integer)
+ << IndexExpr->getSourceRange());
+
+ // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
+ // C++ [expr.sub]p1: The type "T" shall be a completely-defined object
+ // type. Note that Functions are not objects, and that (in C99 parlance)
+ // incomplete types are not object types.
+ if (ResultType->isFunctionType()) {
+ Diag(BaseExpr->getLocStart(), diag::err_subscript_function_type)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+
+ if (!ResultType->isDependentType() &&
+ RequireCompleteType(LLoc, ResultType, diag::err_subscript_incomplete_type,
+ BaseExpr->getSourceRange()))
+ return ExprError();
+
+ // Diagnose bad cases where we step over interface counts.
+ if (ResultType->isObjCInterfaceType() && LangOpts.ObjCNonFragileABI) {
+ Diag(LLoc, diag::err_subscript_nonfragile_interface)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+
+ Base.release();
+ Idx.release();
+ return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
+ ResultType, RLoc));
+}
+
+QualType Sema::
+CheckExtVectorComponent(QualType baseType, SourceLocation OpLoc,
+ IdentifierInfo &CompName, SourceLocation CompLoc) {
+ const ExtVectorType *vecType = baseType->getAsExtVectorType();
+
+ // The vector accessor can't exceed the number of elements.
+ const char *compStr = CompName.getName();
+
+ // This flag determines whether or not the component is one of the four
+ // special names that indicate a subset of exactly half the elements are
+ // to be selected.
+ bool HalvingSwizzle = false;
+
+ // This flag determines whether or not CompName has an 's' char prefix,
+ // indicating that it is a string of hex values to be used as vector indices.
+ bool HexSwizzle = *compStr == 's';
+
+ // Check that we've found one of the special components, or that the component
+ // names must come from the same set.
+ if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") ||
+ !strcmp(compStr, "even") || !strcmp(compStr, "odd")) {
+ HalvingSwizzle = true;
+ } else if (vecType->getPointAccessorIdx(*compStr) != -1) {
+ do
+ compStr++;
+ while (*compStr && vecType->getPointAccessorIdx(*compStr) != -1);
+ } else if (HexSwizzle || vecType->getNumericAccessorIdx(*compStr) != -1) {
+ do
+ compStr++;
+ while (*compStr && vecType->getNumericAccessorIdx(*compStr) != -1);
+ }
+
+ if (!HalvingSwizzle && *compStr) {
+ // We didn't get to the end of the string. This means the component names
+ // didn't come from the same set *or* we encountered an illegal name.
+ Diag(OpLoc, diag::err_ext_vector_component_name_illegal)
+ << std::string(compStr,compStr+1) << SourceRange(CompLoc);
+ return QualType();
+ }
+
+ // Ensure no component accessor exceeds the width of the vector type it
+ // operates on.
+ if (!HalvingSwizzle) {
+ compStr = CompName.getName();
+
+ if (HexSwizzle)
+ compStr++;
+
+ while (*compStr) {
+ if (!vecType->isAccessorWithinNumElements(*compStr++)) {
+ Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
+ << baseType << SourceRange(CompLoc);
+ return QualType();
+ }
+ }
+ }
+
+ // If this is a halving swizzle, verify that the base type has an even
+ // number of elements.
+ if (HalvingSwizzle && (vecType->getNumElements() & 1U)) {
+ Diag(OpLoc, diag::err_ext_vector_component_requires_even)
+ << baseType << SourceRange(CompLoc);
+ return QualType();
+ }
+
+ // The component accessor looks fine - now we need to compute the actual type.
+ // The vector type is implied by the component accessor. For example,
+ // vec4.b is a float, vec4.xy is a vec2, vec4.rgb is a vec3, etc.
+ // vec4.s0 is a float, vec4.s23 is a vec3, etc.
+ // vec4.hi, vec4.lo, vec4.e, and vec4.o all return vec2.
+ unsigned CompSize = HalvingSwizzle ? vecType->getNumElements() / 2
+ : CompName.getLength();
+ if (HexSwizzle)
+ CompSize--;
+
+ if (CompSize == 1)
+ return vecType->getElementType();
+
+ QualType VT = Context.getExtVectorType(vecType->getElementType(), CompSize);
+ // Now look up the TypeDefDecl from the vector type. Without this,
+ // diagostics look bad. We want extended vector types to appear built-in.
+ for (unsigned i = 0, E = ExtVectorDecls.size(); i != E; ++i) {
+ if (ExtVectorDecls[i]->getUnderlyingType() == VT)
+ return Context.getTypedefType(ExtVectorDecls[i]);
+ }
+ return VT; // should never get here (a typedef type should always be found).
+}
+
+static Decl *FindGetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl,
+ IdentifierInfo &Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+
+ if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Context, &Member))
+ return PD;
+ if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Context, Sel))
+ return OMD;
+
+ for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); I != E; ++I) {
+ if (Decl *D = FindGetterNameDeclFromProtocolList(*I, Member, Sel,
+ Context))
+ return D;
+ }
+ return 0;
+}
+
+static Decl *FindGetterNameDecl(const ObjCQualifiedIdType *QIdTy,
+ IdentifierInfo &Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+ // Check protocols on qualified interfaces.
+ Decl *GDecl = 0;
+ for (ObjCQualifiedIdType::qual_iterator I = QIdTy->qual_begin(),
+ E = QIdTy->qual_end(); I != E; ++I) {
+ if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Context, &Member)) {
+ GDecl = PD;
+ break;
+ }
+ // Also must look for a getter name which uses property syntax.
+ if (ObjCMethodDecl *OMD = (*I)->getInstanceMethod(Context, Sel)) {
+ GDecl = OMD;
+ break;
+ }
+ }
+ if (!GDecl) {
+ for (ObjCQualifiedIdType::qual_iterator I = QIdTy->qual_begin(),
+ E = QIdTy->qual_end(); I != E; ++I) {
+ // Search in the protocol-qualifier list of current protocol.
+ GDecl = FindGetterNameDeclFromProtocolList(*I, Member, Sel, Context);
+ if (GDecl)
+ return GDecl;
+ }
+ }
+ return GDecl;
+}
+
+/// FindMethodInNestedImplementations - Look up a method in current and
+/// all base class implementations.
+///
+ObjCMethodDecl *Sema::FindMethodInNestedImplementations(
+ const ObjCInterfaceDecl *IFace,
+ const Selector &Sel) {
+ ObjCMethodDecl *Method = 0;
+ if (ObjCImplementationDecl *ImpDecl
+ = LookupObjCImplementation(IFace->getIdentifier()))
+ Method = ImpDecl->getInstanceMethod(Context, Sel);
+
+ if (!Method && IFace->getSuperClass())
+ return FindMethodInNestedImplementations(IFace->getSuperClass(), Sel);
+ return Method;
+}
+
+Action::OwningExprResult
+Sema::ActOnMemberReferenceExpr(Scope *S, ExprArg Base, SourceLocation OpLoc,
+ tok::TokenKind OpKind, SourceLocation MemberLoc,
+ IdentifierInfo &Member,
+ DeclPtrTy ObjCImpDecl) {
+ Expr *BaseExpr = Base.takeAs<Expr>();
+ assert(BaseExpr && "no record expression");
+
+ // Perform default conversions.
+ DefaultFunctionArrayConversion(BaseExpr);
+
+ QualType BaseType = BaseExpr->getType();
+ assert(!BaseType.isNull() && "no type for member expression");
+
+ // Get the type being accessed in BaseType. If this is an arrow, the BaseExpr
+ // must have pointer type, and the accessed type is the pointee.
+ if (OpKind == tok::arrow) {
+ if (BaseType->isDependentType())
+ return Owned(new (Context) CXXUnresolvedMemberExpr(Context,
+ BaseExpr, true,
+ OpLoc,
+ DeclarationName(&Member),
+ MemberLoc));
+ else if (const PointerType *PT = BaseType->getAsPointerType())
+ BaseType = PT->getPointeeType();
+ else if (getLangOptions().CPlusPlus && BaseType->isRecordType())
+ return Owned(BuildOverloadedArrowExpr(S, BaseExpr, OpLoc,
+ MemberLoc, Member));
+ else
+ return ExprError(Diag(MemberLoc,
+ diag::err_typecheck_member_reference_arrow)
+ << BaseType << BaseExpr->getSourceRange());
+ } else {
+ if (BaseType->isDependentType()) {
+ // Require that the base type isn't a pointer type
+ // (so we'll report an error for)
+ // T* t;
+ // t.f;
+ //
+ // In Obj-C++, however, the above expression is valid, since it could be
+ // accessing the 'f' property if T is an Obj-C interface. The extra check
+ // allows this, while still reporting an error if T is a struct pointer.
+ const PointerType *PT = BaseType->getAsPointerType();
+
+ if (!PT || (getLangOptions().ObjC1 &&
+ !PT->getPointeeType()->isRecordType()))
+ return Owned(new (Context) CXXUnresolvedMemberExpr(Context,
+ BaseExpr, false,
+ OpLoc,
+ DeclarationName(&Member),
+ MemberLoc));
+ }
+ }
+
+ // Handle field access to simple records. This also handles access to fields
+ // of the ObjC 'id' struct.
+ if (const RecordType *RTy = BaseType->getAsRecordType()) {
+ RecordDecl *RDecl = RTy->getDecl();
+ if (RequireCompleteType(OpLoc, BaseType,
+ diag::err_typecheck_incomplete_tag,
+ BaseExpr->getSourceRange()))
+ return ExprError();
+
+ // The record definition is complete, now make sure the member is valid.
+ // FIXME: Qualified name lookup for C++ is a bit more complicated than this.
+ LookupResult Result
+ = LookupQualifiedName(RDecl, DeclarationName(&Member),
+ LookupMemberName, false);
+
+ if (!Result)
+ return ExprError(Diag(MemberLoc, diag::err_typecheck_no_member)
+ << &Member << BaseExpr->getSourceRange());
+ if (Result.isAmbiguous()) {
+ DiagnoseAmbiguousLookup(Result, DeclarationName(&Member),
+ MemberLoc, BaseExpr->getSourceRange());
+ return ExprError();
+ }
+
+ NamedDecl *MemberDecl = Result;
+
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (MemberDecl->isInvalidDecl())
+ return ExprError();
+
+ // Check the use of this field
+ if (DiagnoseUseOfDecl(MemberDecl, MemberLoc))
+ return ExprError();
+
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) {
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ if (cast<RecordDecl>(FD->getDeclContext())->isAnonymousStructOrUnion())
+ return BuildAnonymousStructUnionMemberReference(MemberLoc, FD,
+ BaseExpr, OpLoc);
+
+ // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
+ // FIXME: Handle address space modifiers
+ QualType MemberType = FD->getType();
+ if (const ReferenceType *Ref = MemberType->getAsReferenceType())
+ MemberType = Ref->getPointeeType();
+ else {
+ unsigned combinedQualifiers =
+ MemberType.getCVRQualifiers() | BaseType.getCVRQualifiers();
+ if (FD->isMutable())
+ combinedQualifiers &= ~QualType::Const;
+ MemberType = MemberType.getQualifiedType(combinedQualifiers);
+ }
+
+ return Owned(new (Context) MemberExpr(BaseExpr, OpKind == tok::arrow, FD,
+ MemberLoc, MemberType));
+ }
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl))
+ return Owned(new (Context) MemberExpr(BaseExpr, OpKind == tok::arrow,
+ Var, MemberLoc,
+ Var->getType().getNonReferenceType()));
+ if (FunctionDecl *MemberFn = dyn_cast<FunctionDecl>(MemberDecl))
+ return Owned(new (Context) MemberExpr(BaseExpr, OpKind == tok::arrow,
+ MemberFn, MemberLoc,
+ MemberFn->getType()));
+ if (OverloadedFunctionDecl *Ovl
+ = dyn_cast<OverloadedFunctionDecl>(MemberDecl))
+ return Owned(new (Context) MemberExpr(BaseExpr, OpKind == tok::arrow, Ovl,
+ MemberLoc, Context.OverloadTy));
+ if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl))
+ return Owned(new (Context) MemberExpr(BaseExpr, OpKind == tok::arrow,
+ Enum, MemberLoc, Enum->getType()));
+ if (isa<TypeDecl>(MemberDecl))
+ return ExprError(Diag(MemberLoc,diag::err_typecheck_member_reference_type)
+ << DeclarationName(&Member) << int(OpKind == tok::arrow));
+
+ // We found a declaration kind that we didn't expect. This is a
+ // generic error message that tells the user that she can't refer
+ // to this member with '.' or '->'.
+ return ExprError(Diag(MemberLoc,
+ diag::err_typecheck_member_reference_unknown)
+ << DeclarationName(&Member) << int(OpKind == tok::arrow));
+ }
+
+ // Handle access to Objective-C instance variables, such as "Obj->ivar" and
+ // (*Obj).ivar.
+ if (const ObjCInterfaceType *IFTy = BaseType->getAsObjCInterfaceType()) {
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *IV = IFTy->getDecl()->lookupInstanceVariable(Context,
+ &Member,
+ ClassDeclared)) {
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check whether we can reference this field.
+ if (DiagnoseUseOfDecl(IV, MemberLoc))
+ return ExprError();
+ if (IV->getAccessControl() != ObjCIvarDecl::Public &&
+ IV->getAccessControl() != ObjCIvarDecl::Package) {
+ ObjCInterfaceDecl *ClassOfMethodDecl = 0;
+ if (ObjCMethodDecl *MD = getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (ObjCImpDecl && getCurFunctionDecl()) {
+ // Case of a c-function declared inside an objc implementation.
+ // FIXME: For a c-style function nested inside an objc implementation
+ // class, there is no implementation context available, so we pass
+ // down the context as argument to this routine. Ideally, this context
+ // need be passed down in the AST node and somehow calculated from the
+ // AST for a function decl.
+ Decl *ImplDecl = ObjCImpDecl.getAs<Decl>();
+ if (ObjCImplementationDecl *IMPD =
+ dyn_cast<ObjCImplementationDecl>(ImplDecl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ImplDecl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+
+ if (IV->getAccessControl() == ObjCIvarDecl::Private) {
+ if (ClassDeclared != IFTy->getDecl() ||
+ ClassOfMethodDecl != ClassDeclared)
+ Diag(MemberLoc, diag::error_private_ivar_access) << IV->getDeclName();
+ }
+ // @protected
+ else if (!IFTy->getDecl()->isSuperClassOf(ClassOfMethodDecl))
+ Diag(MemberLoc, diag::error_protected_ivar_access) << IV->getDeclName();
+ }
+
+ return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
+ MemberLoc, BaseExpr,
+ OpKind == tok::arrow));
+ }
+ return ExprError(Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
+ << IFTy->getDecl()->getDeclName() << &Member
+ << BaseExpr->getSourceRange());
+ }
+
+ // Handle Objective-C property access, which is "Obj.property" where Obj is a
+ // pointer to a (potentially qualified) interface type.
+ const PointerType *PTy;
+ const ObjCInterfaceType *IFTy;
+ if (OpKind == tok::period && (PTy = BaseType->getAsPointerType()) &&
+ (IFTy = PTy->getPointeeType()->getAsObjCInterfaceType())) {
+ ObjCInterfaceDecl *IFace = IFTy->getDecl();
+
+ // Search for a declared property first.
+ if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(Context,
+ &Member)) {
+ // Check whether we can reference this property.
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+ QualType ResTy = PD->getType();
+ Selector Sel = PP.getSelectorTable().getNullarySelector(&Member);
+ ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Context, Sel);
+ if (DiagnosePropertyAccessorMismatch(PD, Getter, MemberLoc))
+ ResTy = Getter->getResultType();
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy,
+ MemberLoc, BaseExpr));
+ }
+
+ // Check protocols on qualified interfaces.
+ for (ObjCInterfaceType::qual_iterator I = IFTy->qual_begin(),
+ E = IFTy->qual_end(); I != E; ++I)
+ if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Context,
+ &Member)) {
+ // Check whether we can reference this property.
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, PD->getType(),
+ MemberLoc, BaseExpr));
+ }
+
+ // If that failed, look for an "implicit" property by seeing if the nullary
+ // selector is implemented.
+
+ // FIXME: The logic for looking up nullary and unary selectors should be
+ // shared with the code in ActOnInstanceMessage.
+
+ Selector Sel = PP.getSelectorTable().getNullarySelector(&Member);
+ ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Context, Sel);
+
+ // If this reference is in an @implementation, check for 'private' methods.
+ if (!Getter)
+ Getter = FindMethodInNestedImplementations(IFace, Sel);
+
+ // Look through local category implementations associated with the class.
+ if (!Getter) {
+ for (unsigned i = 0; i < ObjCCategoryImpls.size() && !Getter; i++) {
+ if (ObjCCategoryImpls[i]->getClassInterface() == IFace)
+ Getter = ObjCCategoryImpls[i]->getInstanceMethod(Context, Sel);
+ }
+ }
+ if (Getter) {
+ // Check if we can reference this property.
+ if (DiagnoseUseOfDecl(Getter, MemberLoc))
+ return ExprError();
+ }
+ // If we found a getter then this may be a valid dot-reference, we
+ // will look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), &Member);
+ ObjCMethodDecl *Setter = IFace->lookupInstanceMethod(Context, SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = FindMethodInNestedImplementations(IFace, SetterSel);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter) {
+ for (unsigned i = 0; i < ObjCCategoryImpls.size() && !Setter; i++) {
+ if (ObjCCategoryImpls[i]->getClassInterface() == IFace)
+ Setter = ObjCCategoryImpls[i]->getInstanceMethod(Context, SetterSel);
+ }
+ }
+
+ if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ QualType PType;
+
+ if (Getter)
+ PType = Getter->getResultType();
+ else {
+ for (ObjCMethodDecl::param_iterator PI = Setter->param_begin(),
+ E = Setter->param_end(); PI != E; ++PI)
+ PType = (*PI)->getType();
+ }
+ // FIXME: we must check that the setter has property type.
+ return Owned(new (Context) ObjCKVCRefExpr(Getter, PType,
+ Setter, MemberLoc, BaseExpr));
+ }
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << &Member << BaseType);
+ }
+ // Handle properties on qualified "id" protocols.
+ const ObjCQualifiedIdType *QIdTy;
+ if (OpKind == tok::period && (QIdTy = BaseType->getAsObjCQualifiedIdType())) {
+ // Check protocols on qualified interfaces.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(&Member);
+ if (Decl *PMDecl = FindGetterNameDecl(QIdTy, Member, Sel, Context)) {
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) {
+ // Check the use of this declaration
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, PD->getType(),
+ MemberLoc, BaseExpr));
+ }
+ if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
+ // Check the use of this method.
+ if (DiagnoseUseOfDecl(OMD, MemberLoc))
+ return ExprError();
+
+ return Owned(new (Context) ObjCMessageExpr(BaseExpr, Sel,
+ OMD->getResultType(),
+ OMD, OpLoc, MemberLoc,
+ NULL, 0));
+ }
+ }
+
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << &Member << BaseType);
+ }
+ // Handle properties on ObjC 'Class' types.
+ if (OpKind == tok::period && (BaseType == Context.getObjCClassType())) {
+ // Also must look for a getter name which uses property syntax.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(&Member);
+ if (ObjCMethodDecl *MD = getCurMethodDecl()) {
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ ObjCMethodDecl *Getter;
+ // FIXME: need to also look locally in the implementation.
+ if ((Getter = IFace->lookupClassMethod(Context, Sel))) {
+ // Check the use of this method.
+ if (DiagnoseUseOfDecl(Getter, MemberLoc))
+ return ExprError();
+ }
+ // If we found a getter then this may be a valid dot-reference, we
+ // will look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), &Member);
+ ObjCMethodDecl *Setter = IFace->lookupClassMethod(Context, SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = FindMethodInNestedImplementations(IFace, SetterSel);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter) {
+ for (unsigned i = 0; i < ObjCCategoryImpls.size() && !Setter; i++) {
+ if (ObjCCategoryImpls[i]->getClassInterface() == IFace)
+ Setter = ObjCCategoryImpls[i]->getClassMethod(Context, SetterSel);
+ }
+ }
+
+ if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ QualType PType;
+
+ if (Getter)
+ PType = Getter->getResultType();
+ else {
+ for (ObjCMethodDecl::param_iterator PI = Setter->param_begin(),
+ E = Setter->param_end(); PI != E; ++PI)
+ PType = (*PI)->getType();
+ }
+ // FIXME: we must check that the setter has property type.
+ return Owned(new (Context) ObjCKVCRefExpr(Getter, PType,
+ Setter, MemberLoc, BaseExpr));
+ }
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << &Member << BaseType);
+ }
+ }
+
+ // Handle 'field access' to vectors, such as 'V.xx'.
+ if (BaseType->isExtVectorType()) {
+ QualType ret = CheckExtVectorComponent(BaseType, OpLoc, Member, MemberLoc);
+ if (ret.isNull())
+ return ExprError();
+ return Owned(new (Context) ExtVectorElementExpr(ret, BaseExpr, Member,
+ MemberLoc));
+ }
+
+ Diag(MemberLoc, diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr->getSourceRange();
+
+ // If the user is trying to apply -> or . to a function or function
+ // pointer, it's probably because they forgot parentheses to call
+ // the function. Suggest the addition of those parentheses.
+ if (BaseType == Context.OverloadTy ||
+ BaseType->isFunctionType() ||
+ (BaseType->isPointerType() &&
+ BaseType->getAsPointerType()->isFunctionType())) {
+ SourceLocation Loc = PP.getLocForEndOfToken(BaseExpr->getLocEnd());
+ Diag(Loc, diag::note_member_reference_needs_call)
+ << CodeModificationHint::CreateInsertion(Loc, "()");
+ }
+
+ return ExprError();
+}
+
+/// ConvertArgumentsForCall - Converts the arguments specified in
+/// Args/NumArgs to the parameter types of the function FDecl with
+/// function prototype Proto. Call is the call expression itself, and
+/// Fn is the function expression. For a C++ member function, this
+/// routine does not attempt to convert the object argument. Returns
+/// true if the call is ill-formed.
+bool
+Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
+ FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc) {
+ // C99 6.5.2.2p7 - the arguments are implicitly converted, as if by
+ // assignment, to the types of the corresponding parameter, ...
+ unsigned NumArgsInProto = Proto->getNumArgs();
+ unsigned NumArgsToCheck = NumArgs;
+ bool Invalid = false;
+
+ // If too few arguments are available (and we don't have default
+ // arguments for the remaining parameters), don't make the call.
+ if (NumArgs < NumArgsInProto) {
+ if (!FDecl || NumArgs < FDecl->getMinRequiredArguments())
+ return Diag(RParenLoc, diag::err_typecheck_call_too_few_args)
+ << Fn->getType()->isBlockPointerType() << Fn->getSourceRange();
+ // Use default arguments for missing arguments
+ NumArgsToCheck = NumArgsInProto;
+ Call->setNumArgs(Context, NumArgsInProto);
+ }
+
+ // If too many are passed and not variadic, error on the extras and drop
+ // them.
+ if (NumArgs > NumArgsInProto) {
+ if (!Proto->isVariadic()) {
+ Diag(Args[NumArgsInProto]->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << Fn->getType()->isBlockPointerType() << Fn->getSourceRange()
+ << SourceRange(Args[NumArgsInProto]->getLocStart(),
+ Args[NumArgs-1]->getLocEnd());
+ // This deletes the extra arguments.
+ Call->setNumArgs(Context, NumArgsInProto);
+ Invalid = true;
+ }
+ NumArgsToCheck = NumArgsInProto;
+ }
+
+ // Continue to check argument types (even if we have too few/many args).
+ for (unsigned i = 0; i != NumArgsToCheck; i++) {
+ QualType ProtoArgType = Proto->getArgType(i);
+
+ Expr *Arg;
+ if (i < NumArgs) {
+ Arg = Args[i];
+
+ if (RequireCompleteType(Arg->getSourceRange().getBegin(),
+ ProtoArgType,
+ diag::err_call_incomplete_argument,
+ Arg->getSourceRange()))
+ return true;
+
+ // Pass the argument.
+ if (PerformCopyInitialization(Arg, ProtoArgType, "passing"))
+ return true;
+ } else
+ // We already type-checked the argument, so we know it works.
+ Arg = new (Context) CXXDefaultArgExpr(FDecl->getParamDecl(i));
+ QualType ArgType = Arg->getType();
+
+ Call->setArg(i, Arg);
+ }
+
+ // If this is a variadic call, handle args passed through "...".
+ if (Proto->isVariadic()) {
+ VariadicCallType CallType = VariadicFunction;
+ if (Fn->getType()->isBlockPointerType())
+ CallType = VariadicBlock; // Block
+ else if (isa<MemberExpr>(Fn))
+ CallType = VariadicMethod;
+
+ // Promote the arguments (C99 6.5.2.2p7).
+ for (unsigned i = NumArgsInProto; i != NumArgs; i++) {
+ Expr *Arg = Args[i];
+ Invalid |= DefaultVariadicArgumentPromotion(Arg, CallType);
+ Call->setArg(i, Arg);
+ }
+ }
+
+ return Invalid;
+}
+
+/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
+/// This provides the location of the left/right parens and a list of comma
+/// locations.
+Action::OwningExprResult
+Sema::ActOnCallExpr(Scope *S, ExprArg fn, SourceLocation LParenLoc,
+ MultiExprArg args,
+ SourceLocation *CommaLocs, SourceLocation RParenLoc) {
+ unsigned NumArgs = args.size();
+ Expr *Fn = fn.takeAs<Expr>();
+ Expr **Args = reinterpret_cast<Expr**>(args.release());
+ assert(Fn && "no function call expression");
+ FunctionDecl *FDecl = NULL;
+ NamedDecl *NDecl = NULL;
+ DeclarationName UnqualifiedName;
+
+ if (getLangOptions().CPlusPlus) {
+ // Determine whether this is a dependent call inside a C++ template,
+ // in which case we won't do any semantic analysis now.
+ // FIXME: Will need to cache the results of name lookup (including ADL) in
+ // Fn.
+ bool Dependent = false;
+ if (Fn->isTypeDependent())
+ Dependent = true;
+ else if (Expr::hasAnyTypeDependentArguments(Args, NumArgs))
+ Dependent = true;
+
+ if (Dependent)
+ return Owned(new (Context) CallExpr(Context, Fn, Args, NumArgs,
+ Context.DependentTy, RParenLoc));
+
+ // Determine whether this is a call to an object (C++ [over.call.object]).
+ if (Fn->getType()->isRecordType())
+ return Owned(BuildCallToObjectOfClassType(S, Fn, LParenLoc, Args, NumArgs,
+ CommaLocs, RParenLoc));
+
+ // Determine whether this is a call to a member function.
+ if (MemberExpr *MemExpr = dyn_cast<MemberExpr>(Fn->IgnoreParens()))
+ if (isa<OverloadedFunctionDecl>(MemExpr->getMemberDecl()) ||
+ isa<CXXMethodDecl>(MemExpr->getMemberDecl()))
+ return Owned(BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs,
+ CommaLocs, RParenLoc));
+ }
+
+ // If we're directly calling a function, get the appropriate declaration.
+ DeclRefExpr *DRExpr = NULL;
+ Expr *FnExpr = Fn;
+ bool ADL = true;
+ while (true) {
+ if (ImplicitCastExpr *IcExpr = dyn_cast<ImplicitCastExpr>(FnExpr))
+ FnExpr = IcExpr->getSubExpr();
+ else if (ParenExpr *PExpr = dyn_cast<ParenExpr>(FnExpr)) {
+ // Parentheses around a function disable ADL
+ // (C++0x [basic.lookup.argdep]p1).
+ ADL = false;
+ FnExpr = PExpr->getSubExpr();
+ } else if (isa<UnaryOperator>(FnExpr) &&
+ cast<UnaryOperator>(FnExpr)->getOpcode()
+ == UnaryOperator::AddrOf) {
+ FnExpr = cast<UnaryOperator>(FnExpr)->getSubExpr();
+ } else if ((DRExpr = dyn_cast<DeclRefExpr>(FnExpr))) {
+ // Qualified names disable ADL (C++0x [basic.lookup.argdep]p1).
+ ADL &= !isa<QualifiedDeclRefExpr>(DRExpr);
+ break;
+ } else if (UnresolvedFunctionNameExpr *DepName
+ = dyn_cast<UnresolvedFunctionNameExpr>(FnExpr)) {
+ UnqualifiedName = DepName->getName();
+ break;
+ } else {
+ // Any kind of name that does not refer to a declaration (or
+ // set of declarations) disables ADL (C++0x [basic.lookup.argdep]p3).
+ ADL = false;
+ break;
+ }
+ }
+
+ OverloadedFunctionDecl *Ovl = 0;
+ if (DRExpr) {
+ FDecl = dyn_cast<FunctionDecl>(DRExpr->getDecl());
+ Ovl = dyn_cast<OverloadedFunctionDecl>(DRExpr->getDecl());
+ NDecl = dyn_cast<NamedDecl>(DRExpr->getDecl());
+ }
+
+ if (Ovl || (getLangOptions().CPlusPlus && (FDecl || UnqualifiedName))) {
+ // We don't perform ADL for implicit declarations of builtins.
+ if (FDecl && FDecl->getBuiltinID(Context) && FDecl->isImplicit())
+ ADL = false;
+
+ // We don't perform ADL in C.
+ if (!getLangOptions().CPlusPlus)
+ ADL = false;
+
+ if (Ovl || ADL) {
+ FDecl = ResolveOverloadedCallFn(Fn, DRExpr? DRExpr->getDecl() : 0,
+ UnqualifiedName, LParenLoc, Args,
+ NumArgs, CommaLocs, RParenLoc, ADL);
+ if (!FDecl)
+ return ExprError();
+
+ // Update Fn to refer to the actual function selected.
+ Expr *NewFn = 0;
+ if (QualifiedDeclRefExpr *QDRExpr
+ = dyn_cast_or_null<QualifiedDeclRefExpr>(DRExpr))
+ NewFn = new (Context) QualifiedDeclRefExpr(FDecl, FDecl->getType(),
+ QDRExpr->getLocation(),
+ false, false,
+ QDRExpr->getQualifierRange(),
+ QDRExpr->getQualifier());
+ else
+ NewFn = new (Context) DeclRefExpr(FDecl, FDecl->getType(),
+ Fn->getSourceRange().getBegin());
+ Fn->Destroy(Context);
+ Fn = NewFn;
+ }
+ }
+
+ // Promote the function operand.
+ UsualUnaryConversions(Fn);
+
+ // Make the call expr early, before semantic checks. This guarantees cleanup
+ // of arguments and function on error.
+ ExprOwningPtr<CallExpr> TheCall(this, new (Context) CallExpr(Context, Fn,
+ Args, NumArgs,
+ Context.BoolTy,
+ RParenLoc));
+
+ const FunctionType *FuncT;
+ if (!Fn->getType()->isBlockPointerType()) {
+ // C99 6.5.2.2p1 - "The expression that denotes the called function shall
+ // have type pointer to function".
+ const PointerType *PT = Fn->getType()->getAsPointerType();
+ if (PT == 0)
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ FuncT = PT->getPointeeType()->getAsFunctionType();
+ } else { // This is a block call.
+ FuncT = Fn->getType()->getAsBlockPointerType()->getPointeeType()->
+ getAsFunctionType();
+ }
+ if (FuncT == 0)
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+
+ // Check for a valid return type
+ if (!FuncT->getResultType()->isVoidType() &&
+ RequireCompleteType(Fn->getSourceRange().getBegin(),
+ FuncT->getResultType(),
+ diag::err_call_incomplete_return,
+ TheCall->getSourceRange()))
+ return ExprError();
+
+ // We know the result type of the call, set it.
+ TheCall->setType(FuncT->getResultType().getNonReferenceType());
+
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT)) {
+ if (ConvertArgumentsForCall(&*TheCall, Fn, FDecl, Proto, Args, NumArgs,
+ RParenLoc))
+ return ExprError();
+ } else {
+ assert(isa<FunctionNoProtoType>(FuncT) && "Unknown FunctionType!");
+
+ if (FDecl) {
+ // Check if we have too few/too many template arguments, based
+ // on our knowledge of the function definition.
+ const FunctionDecl *Def = 0;
+ if (FDecl->getBody(Context, Def) && NumArgs != Def->param_size()) {
+ const FunctionProtoType *Proto =
+ Def->getType()->getAsFunctionProtoType();
+ if (!Proto || !(Proto->isVariadic() && NumArgs >= Def->param_size())) {
+ Diag(RParenLoc, diag::warn_call_wrong_number_of_arguments)
+ << (NumArgs > Def->param_size()) << FDecl << Fn->getSourceRange();
+ }
+ }
+ }
+
+ // Promote the arguments (C99 6.5.2.2p6).
+ for (unsigned i = 0; i != NumArgs; i++) {
+ Expr *Arg = Args[i];
+ DefaultArgumentPromotion(Arg);
+ if (RequireCompleteType(Arg->getSourceRange().getBegin(),
+ Arg->getType(),
+ diag::err_call_incomplete_argument,
+ Arg->getSourceRange()))
+ return ExprError();
+ TheCall->setArg(i, Arg);
+ }
+ }
+
+ if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl))
+ if (!Method->isStatic())
+ return ExprError(Diag(LParenLoc, diag::err_member_call_without_object)
+ << Fn->getSourceRange());
+
+ // Check for sentinels
+ if (NDecl)
+ DiagnoseSentinelCalls(NDecl, LParenLoc, Args, NumArgs);
+ // Do special checking on direct calls to functions.
+ if (FDecl)
+ return CheckFunctionCall(FDecl, TheCall.take());
+ if (NDecl)
+ return CheckBlockCall(NDecl, TheCall.take());
+
+ return Owned(TheCall.take());
+}
+
+Action::OwningExprResult
+Sema::ActOnCompoundLiteral(SourceLocation LParenLoc, TypeTy *Ty,
+ SourceLocation RParenLoc, ExprArg InitExpr) {
+ assert((Ty != 0) && "ActOnCompoundLiteral(): missing type");
+ QualType literalType = QualType::getFromOpaquePtr(Ty);
+ // FIXME: put back this assert when initializers are worked out.
+ //assert((InitExpr != 0) && "ActOnCompoundLiteral(): missing expression");
+ Expr *literalExpr = static_cast<Expr*>(InitExpr.get());
+
+ if (literalType->isArrayType()) {
+ if (literalType->isVariableArrayType())
+ return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
+ << SourceRange(LParenLoc, literalExpr->getSourceRange().getEnd()));
+ } else if (!literalType->isDependentType() &&
+ RequireCompleteType(LParenLoc, literalType,
+ diag::err_typecheck_decl_incomplete_type,
+ SourceRange(LParenLoc, literalExpr->getSourceRange().getEnd())))
+ return ExprError();
+
+ if (CheckInitializerTypes(literalExpr, literalType, LParenLoc,
+ DeclarationName(), /*FIXME:DirectInit=*/false))
+ return ExprError();
+
+ bool isFileScope = getCurFunctionOrMethodDecl() == 0;
+ if (isFileScope) { // 6.5.2.5p3
+ if (CheckForConstantInitializer(literalExpr, literalType))
+ return ExprError();
+ }
+ InitExpr.release();
+ return Owned(new (Context) CompoundLiteralExpr(LParenLoc, literalType,
+ literalExpr, isFileScope));
+}
+
+Action::OwningExprResult
+Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg initlist,
+ SourceLocation RBraceLoc) {
+ unsigned NumInit = initlist.size();
+ Expr **InitList = reinterpret_cast<Expr**>(initlist.release());
+
+ // Semantic analysis for initializers is done by ActOnDeclarator() and
+ // CheckInitializer() - it requires knowledge of the object being intialized.
+
+ InitListExpr *E = new (Context) InitListExpr(LBraceLoc, InitList, NumInit,
+ RBraceLoc);
+ E->setType(Context.VoidTy); // FIXME: just a place holder for now.
+ return Owned(E);
+}
+
+/// CheckCastTypes - Check type constraints for casting between types.
+bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr) {
+ UsualUnaryConversions(castExpr);
+
+ // C99 6.5.4p2: the cast type needs to be void or scalar and the expression
+ // type needs to be scalar.
+ if (castType->isVoidType()) {
+ // Cast to void allows any expr type.
+ } else if (castType->isDependentType() || castExpr->isTypeDependent()) {
+ // We can't check any more until template instantiation time.
+ } else if (!castType->isScalarType() && !castType->isVectorType()) {
+ if (Context.getCanonicalType(castType).getUnqualifiedType() ==
+ Context.getCanonicalType(castExpr->getType().getUnqualifiedType()) &&
+ (castType->isStructureType() || castType->isUnionType())) {
+ // GCC struct/union extension: allow cast to self.
+ // FIXME: Check that the cast destination type is complete.
+ Diag(TyR.getBegin(), diag::ext_typecheck_cast_nonscalar)
+ << castType << castExpr->getSourceRange();
+ } else if (castType->isUnionType()) {
+ // GCC cast to union extension
+ RecordDecl *RD = castType->getAsRecordType()->getDecl();
+ RecordDecl::field_iterator Field, FieldEnd;
+ for (Field = RD->field_begin(Context), FieldEnd = RD->field_end(Context);
+ Field != FieldEnd; ++Field) {
+ if (Context.getCanonicalType(Field->getType()).getUnqualifiedType() ==
+ Context.getCanonicalType(castExpr->getType()).getUnqualifiedType()) {
+ Diag(TyR.getBegin(), diag::ext_typecheck_cast_to_union)
+ << castExpr->getSourceRange();
+ break;
+ }
+ }
+ if (Field == FieldEnd)
+ return Diag(TyR.getBegin(), diag::err_typecheck_cast_to_union_no_type)
+ << castExpr->getType() << castExpr->getSourceRange();
+ } else {
+ // Reject any other conversions to non-scalar types.
+ return Diag(TyR.getBegin(), diag::err_typecheck_cond_expect_scalar)
+ << castType << castExpr->getSourceRange();
+ }
+ } else if (!castExpr->getType()->isScalarType() &&
+ !castExpr->getType()->isVectorType()) {
+ return Diag(castExpr->getLocStart(),
+ diag::err_typecheck_expect_scalar_operand)
+ << castExpr->getType() << castExpr->getSourceRange();
+ } else if (castExpr->getType()->isVectorType()) {
+ if (CheckVectorCast(TyR, castExpr->getType(), castType))
+ return true;
+ } else if (castType->isVectorType()) {
+ if (CheckVectorCast(TyR, castType, castExpr->getType()))
+ return true;
+ } else if (getLangOptions().ObjC1 && isa<ObjCSuperExpr>(castExpr)) {
+ return Diag(castExpr->getLocStart(), diag::err_illegal_super_cast) << TyR;
+ } else if (!castType->isArithmeticType()) {
+ QualType castExprType = castExpr->getType();
+ if (!castExprType->isIntegralType() && castExprType->isArithmeticType())
+ return Diag(castExpr->getLocStart(),
+ diag::err_cast_pointer_from_non_pointer_int)
+ << castExprType << castExpr->getSourceRange();
+ } else if (!castExpr->getType()->isArithmeticType()) {
+ if (!castType->isIntegralType() && castType->isArithmeticType())
+ return Diag(castExpr->getLocStart(),
+ diag::err_cast_pointer_to_non_pointer_int)
+ << castType << castExpr->getSourceRange();
+ }
+ if (isa<ObjCSelectorExpr>(castExpr))
+ return Diag(castExpr->getLocStart(), diag::err_cast_selector_expr);
+ return false;
+}
+
+bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty) {
+ assert(VectorTy->isVectorType() && "Not a vector type!");
+
+ if (Ty->isVectorType() || Ty->isIntegerType()) {
+ if (Context.getTypeSize(VectorTy) != Context.getTypeSize(Ty))
+ return Diag(R.getBegin(),
+ Ty->isVectorType() ?
+ diag::err_invalid_conversion_between_vectors :
+ diag::err_invalid_conversion_between_vector_and_integer)
+ << VectorTy << Ty << R;
+ } else
+ return Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_vector_and_scalar)
+ << VectorTy << Ty << R;
+
+ return false;
+}
+
+Action::OwningExprResult
+Sema::ActOnCastExpr(SourceLocation LParenLoc, TypeTy *Ty,
+ SourceLocation RParenLoc, ExprArg Op) {
+ assert((Ty != 0) && (Op.get() != 0) &&
+ "ActOnCastExpr(): missing type or expr");
+
+ Expr *castExpr = Op.takeAs<Expr>();
+ QualType castType = QualType::getFromOpaquePtr(Ty);
+
+ if (CheckCastTypes(SourceRange(LParenLoc, RParenLoc), castType, castExpr))
+ return ExprError();
+ return Owned(new (Context) CStyleCastExpr(castType, castExpr, castType,
+ LParenLoc, RParenLoc));
+}
+
+/// Note that lhs is not null here, even if this is the gnu "x ?: y" extension.
+/// In that case, lhs = cond.
+/// C99 6.5.15
+QualType Sema::CheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
+ SourceLocation QuestionLoc) {
+ // C++ is sufficiently different to merit its own checker.
+ if (getLangOptions().CPlusPlus)
+ return CXXCheckConditionalOperands(Cond, LHS, RHS, QuestionLoc);
+
+ UsualUnaryConversions(Cond);
+ UsualUnaryConversions(LHS);
+ UsualUnaryConversions(RHS);
+ QualType CondTy = Cond->getType();
+ QualType LHSTy = LHS->getType();
+ QualType RHSTy = RHS->getType();
+
+ // first, check the condition.
+ if (!CondTy->isScalarType()) { // C99 6.5.15p2
+ Diag(Cond->getLocStart(), diag::err_typecheck_cond_expect_scalar)
+ << CondTy;
+ return QualType();
+ }
+
+ // Now check the two expressions.
+
+ // If both operands have arithmetic type, do the usual arithmetic conversions
+ // to find a common type: C99 6.5.15p3,5.
+ if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
+ UsualArithmeticConversions(LHS, RHS);
+ return LHS->getType();
+ }
+
+ // If both operands are the same structure or union type, the result is that
+ // type.
+ if (const RecordType *LHSRT = LHSTy->getAsRecordType()) { // C99 6.5.15p3
+ if (const RecordType *RHSRT = RHSTy->getAsRecordType())
+ if (LHSRT->getDecl() == RHSRT->getDecl())
+ // "If both the operands have structure or union type, the result has
+ // that type." This implies that CV qualifiers are dropped.
+ return LHSTy.getUnqualifiedType();
+ // FIXME: Type of conditional expression must be complete in C mode.
+ }
+
+ // C99 6.5.15p5: "If both operands have void type, the result has void type."
+ // The following || allows only one side to be void (a GCC-ism).
+ if (LHSTy->isVoidType() || RHSTy->isVoidType()) {
+ if (!LHSTy->isVoidType())
+ Diag(RHS->getLocStart(), diag::ext_typecheck_cond_one_void)
+ << RHS->getSourceRange();
+ if (!RHSTy->isVoidType())
+ Diag(LHS->getLocStart(), diag::ext_typecheck_cond_one_void)
+ << LHS->getSourceRange();
+ ImpCastExprToType(LHS, Context.VoidTy);
+ ImpCastExprToType(RHS, Context.VoidTy);
+ return Context.VoidTy;
+ }
+ // C99 6.5.15p6 - "if one operand is a null pointer constant, the result has
+ // the type of the other operand."
+ if ((LHSTy->isPointerType() || LHSTy->isBlockPointerType() ||
+ Context.isObjCObjectPointerType(LHSTy)) &&
+ RHS->isNullPointerConstant(Context)) {
+ ImpCastExprToType(RHS, LHSTy); // promote the null to a pointer.
+ return LHSTy;
+ }
+ if ((RHSTy->isPointerType() || RHSTy->isBlockPointerType() ||
+ Context.isObjCObjectPointerType(RHSTy)) &&
+ LHS->isNullPointerConstant(Context)) {
+ ImpCastExprToType(LHS, RHSTy); // promote the null to a pointer.
+ return RHSTy;
+ }
+
+ const PointerType *LHSPT = LHSTy->getAsPointerType();
+ const PointerType *RHSPT = RHSTy->getAsPointerType();
+ const BlockPointerType *LHSBPT = LHSTy->getAsBlockPointerType();
+ const BlockPointerType *RHSBPT = RHSTy->getAsBlockPointerType();
+
+ // Handle the case where both operands are pointers before we handle null
+ // pointer constants in case both operands are null pointer constants.
+ if ((LHSPT || LHSBPT) && (RHSPT || RHSBPT)) { // C99 6.5.15p3,6
+ // get the "pointed to" types
+ QualType lhptee = (LHSPT ? LHSPT->getPointeeType()
+ : LHSBPT->getPointeeType());
+ QualType rhptee = (RHSPT ? RHSPT->getPointeeType()
+ : RHSBPT->getPointeeType());
+
+ // ignore qualifiers on void (C99 6.5.15p3, clause 6)
+ if (lhptee->isVoidType()
+ && (RHSBPT || rhptee->isIncompleteOrObjectType())) {
+ // Figure out necessary qualifiers (C99 6.5.15p6)
+ QualType destPointee=lhptee.getQualifiedType(rhptee.getCVRQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ ImpCastExprToType(LHS, destType); // add qualifiers if necessary
+ ImpCastExprToType(RHS, destType); // promote to void*
+ return destType;
+ }
+ if (rhptee->isVoidType()
+ && (LHSBPT || lhptee->isIncompleteOrObjectType())) {
+ QualType destPointee=rhptee.getQualifiedType(lhptee.getCVRQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ ImpCastExprToType(LHS, destType); // add qualifiers if necessary
+ ImpCastExprToType(RHS, destType); // promote to void*
+ return destType;
+ }
+
+ bool sameKind = (LHSPT && RHSPT) || (LHSBPT && RHSBPT);
+ if (sameKind
+ && Context.getCanonicalType(LHSTy) == Context.getCanonicalType(RHSTy)) {
+ // Two identical pointer types are always compatible.
+ return LHSTy;
+ }
+
+ QualType compositeType = LHSTy;
+
+ // If either type is an Objective-C object type then check
+ // compatibility according to Objective-C.
+ if (Context.isObjCObjectPointerType(LHSTy) ||
+ Context.isObjCObjectPointerType(RHSTy)) {
+ // If both operands are interfaces and either operand can be
+ // assigned to the other, use that type as the composite
+ // type. This allows
+ // xxx ? (A*) a : (B*) b
+ // where B is a subclass of A.
+ //
+ // Additionally, as for assignment, if either type is 'id'
+ // allow silent coercion. Finally, if the types are
+ // incompatible then make sure to use 'id' as the composite
+ // type so the result is acceptable for sending messages to.
+
+ // FIXME: Consider unifying with 'areComparableObjCPointerTypes'.
+ // It could return the composite type.
+ const ObjCInterfaceType* LHSIface = lhptee->getAsObjCInterfaceType();
+ const ObjCInterfaceType* RHSIface = rhptee->getAsObjCInterfaceType();
+ if (LHSIface && RHSIface &&
+ Context.canAssignObjCInterfaces(LHSIface, RHSIface)) {
+ compositeType = LHSTy;
+ } else if (LHSIface && RHSIface &&
+ Context.canAssignObjCInterfaces(RHSIface, LHSIface)) {
+ compositeType = RHSTy;
+ } else if (Context.isObjCIdStructType(lhptee) ||
+ Context.isObjCIdStructType(rhptee)) {
+ compositeType = Context.getObjCIdType();
+ } else if (LHSBPT || RHSBPT) {
+ if (!sameKind
+ || !Context.typesAreBlockCompatible(lhptee.getUnqualifiedType(),
+ rhptee.getUnqualifiedType()))
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS->getSourceRange() << RHS->getSourceRange();
+ return QualType();
+ } else {
+ Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ QualType incompatTy = Context.getObjCIdType();
+ ImpCastExprToType(LHS, incompatTy);
+ ImpCastExprToType(RHS, incompatTy);
+ return incompatTy;
+ }
+ } else if (!sameKind
+ || !Context.typesAreCompatible(lhptee.getUnqualifiedType(),
+ rhptee.getUnqualifiedType())) {
+ Diag(QuestionLoc, diag::warn_typecheck_cond_incompatible_pointers)
+ << LHSTy << RHSTy << LHS->getSourceRange() << RHS->getSourceRange();
+ // In this situation, we assume void* type. No especially good
+ // reason, but this is what gcc does, and we do have to pick
+ // to get a consistent AST.
+ QualType incompatTy = Context.getPointerType(Context.VoidTy);
+ ImpCastExprToType(LHS, incompatTy);
+ ImpCastExprToType(RHS, incompatTy);
+ return incompatTy;
+ }
+ // The pointer types are compatible.
+ // C99 6.5.15p6: If both operands are pointers to compatible types *or* to
+ // differently qualified versions of compatible types, the result type is
+ // a pointer to an appropriately qualified version of the *composite*
+ // type.
+ // FIXME: Need to calculate the composite type.
+ // FIXME: Need to add qualifiers
+ ImpCastExprToType(LHS, compositeType);
+ ImpCastExprToType(RHS, compositeType);
+ return compositeType;
+ }
+
+ // GCC compatibility: soften pointer/integer mismatch.
+ if (RHSTy->isPointerType() && LHSTy->isIntegerType()) {
+ Diag(QuestionLoc, diag::warn_typecheck_cond_pointer_integer_mismatch)
+ << LHSTy << RHSTy << LHS->getSourceRange() << RHS->getSourceRange();
+ ImpCastExprToType(LHS, RHSTy); // promote the integer to a pointer.
+ return RHSTy;
+ }
+ if (LHSTy->isPointerType() && RHSTy->isIntegerType()) {
+ Diag(QuestionLoc, diag::warn_typecheck_cond_pointer_integer_mismatch)
+ << LHSTy << RHSTy << LHS->getSourceRange() << RHS->getSourceRange();
+ ImpCastExprToType(RHS, LHSTy); // promote the integer to a pointer.
+ return LHSTy;
+ }
+
+ // Need to handle "id<xx>" explicitly. Unlike "id", whose canonical type
+ // evaluates to "struct objc_object *" (and is handled above when comparing
+ // id with statically typed objects).
+ if (LHSTy->isObjCQualifiedIdType() || RHSTy->isObjCQualifiedIdType()) {
+ // GCC allows qualified id and any Objective-C type to devolve to
+ // id. Currently localizing to here until clear this should be
+ // part of ObjCQualifiedIdTypesAreCompatible.
+ if (ObjCQualifiedIdTypesAreCompatible(LHSTy, RHSTy, true) ||
+ (LHSTy->isObjCQualifiedIdType() &&
+ Context.isObjCObjectPointerType(RHSTy)) ||
+ (RHSTy->isObjCQualifiedIdType() &&
+ Context.isObjCObjectPointerType(LHSTy))) {
+ // FIXME: This is not the correct composite type. This only happens to
+ // work because id can more or less be used anywhere, however this may
+ // change the type of method sends.
+
+ // FIXME: gcc adds some type-checking of the arguments and emits
+ // (confusing) incompatible comparison warnings in some
+ // cases. Investigate.
+ QualType compositeType = Context.getObjCIdType();
+ ImpCastExprToType(LHS, compositeType);
+ ImpCastExprToType(RHS, compositeType);
+ return compositeType;
+ }
+ }
+
+ // Otherwise, the operands are not compatible.
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS->getSourceRange() << RHS->getSourceRange();
+ return QualType();
+}
+
+/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
+/// in the case of a the GNU conditional expr extension.
+Action::OwningExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
+ SourceLocation ColonLoc,
+ ExprArg Cond, ExprArg LHS,
+ ExprArg RHS) {
+ Expr *CondExpr = (Expr *) Cond.get();
+ Expr *LHSExpr = (Expr *) LHS.get(), *RHSExpr = (Expr *) RHS.get();
+
+ // If this is the gnu "x ?: y" extension, analyze the types as though the LHS
+ // was the condition.
+ bool isLHSNull = LHSExpr == 0;
+ if (isLHSNull)
+ LHSExpr = CondExpr;
+
+ QualType result = CheckConditionalOperands(CondExpr, LHSExpr,
+ RHSExpr, QuestionLoc);
+ if (result.isNull())
+ return ExprError();
+
+ Cond.release();
+ LHS.release();
+ RHS.release();
+ return Owned(new (Context) ConditionalOperator(CondExpr,
+ isLHSNull ? 0 : LHSExpr,
+ RHSExpr, result));
+}
+
+
+// CheckPointerTypesForAssignment - This is a very tricky routine (despite
+// being closely modeled after the C99 spec:-). The odd characteristic of this
+// routine is it effectively iqnores the qualifiers on the top level pointee.
+// This circumvents the usual type rules specified in 6.2.7p1 & 6.7.5.[1-3].
+// FIXME: add a couple examples in this comment.
+Sema::AssignConvertType
+Sema::CheckPointerTypesForAssignment(QualType lhsType, QualType rhsType) {
+ QualType lhptee, rhptee;
+
+ // get the "pointed to" type (ignoring qualifiers at the top level)
+ lhptee = lhsType->getAsPointerType()->getPointeeType();
+ rhptee = rhsType->getAsPointerType()->getPointeeType();
+
+ // make sure we operate on the canonical type
+ lhptee = Context.getCanonicalType(lhptee);
+ rhptee = Context.getCanonicalType(rhptee);
+
+ AssignConvertType ConvTy = Compatible;
+
+ // C99 6.5.16.1p1: This following citation is common to constraints
+ // 3 & 4 (below). ...and the type *pointed to* by the left has all the
+ // qualifiers of the type *pointed to* by the right;
+ // FIXME: Handle ExtQualType
+ if (!lhptee.isAtLeastAsQualifiedAs(rhptee))
+ ConvTy = CompatiblePointerDiscardsQualifiers;
+
+ // C99 6.5.16.1p1 (constraint 4): If one operand is a pointer to an object or
+ // incomplete type and the other is a pointer to a qualified or unqualified
+ // version of void...
+ if (lhptee->isVoidType()) {
+ if (rhptee->isIncompleteOrObjectType())
+ return ConvTy;
+
+ // As an extension, we allow cast to/from void* to function pointer.
+ assert(rhptee->isFunctionType());
+ return FunctionVoidPointer;
+ }
+
+ if (rhptee->isVoidType()) {
+ if (lhptee->isIncompleteOrObjectType())
+ return ConvTy;
+
+ // As an extension, we allow cast to/from void* to function pointer.
+ assert(lhptee->isFunctionType());
+ return FunctionVoidPointer;
+ }
+ // C99 6.5.16.1p1 (constraint 3): both operands are pointers to qualified or
+ // unqualified versions of compatible types, ...
+ lhptee = lhptee.getUnqualifiedType();
+ rhptee = rhptee.getUnqualifiedType();
+ if (!Context.typesAreCompatible(lhptee, rhptee)) {
+ // Check if the pointee types are compatible ignoring the sign.
+ // We explicitly check for char so that we catch "char" vs
+ // "unsigned char" on systems where "char" is unsigned.
+ if (lhptee->isCharType()) {
+ lhptee = Context.UnsignedCharTy;
+ } else if (lhptee->isSignedIntegerType()) {
+ lhptee = Context.getCorrespondingUnsignedType(lhptee);
+ }
+ if (rhptee->isCharType()) {
+ rhptee = Context.UnsignedCharTy;
+ } else if (rhptee->isSignedIntegerType()) {
+ rhptee = Context.getCorrespondingUnsignedType(rhptee);
+ }
+ if (lhptee == rhptee) {
+ // Types are compatible ignoring the sign. Qualifier incompatibility
+ // takes priority over sign incompatibility because the sign
+ // warning can be disabled.
+ if (ConvTy != Compatible)
+ return ConvTy;
+ return IncompatiblePointerSign;
+ }
+ // General pointer incompatibility takes priority over qualifiers.
+ return IncompatiblePointer;
+ }
+ return ConvTy;
+}
+
+/// CheckBlockPointerTypesForAssignment - This routine determines whether two
+/// block pointer types are compatible or whether a block and normal pointer
+/// are compatible. It is more restrict than comparing two function pointer
+// types.
+Sema::AssignConvertType
+Sema::CheckBlockPointerTypesForAssignment(QualType lhsType,
+ QualType rhsType) {
+ QualType lhptee, rhptee;
+
+ // get the "pointed to" type (ignoring qualifiers at the top level)
+ lhptee = lhsType->getAsBlockPointerType()->getPointeeType();
+ rhptee = rhsType->getAsBlockPointerType()->getPointeeType();
+
+ // make sure we operate on the canonical type
+ lhptee = Context.getCanonicalType(lhptee);
+ rhptee = Context.getCanonicalType(rhptee);
+
+ AssignConvertType ConvTy = Compatible;
+
+ // For blocks we enforce that qualifiers are identical.
+ if (lhptee.getCVRQualifiers() != rhptee.getCVRQualifiers())
+ ConvTy = CompatiblePointerDiscardsQualifiers;
+
+ if (!Context.typesAreBlockCompatible(lhptee, rhptee))
+ return IncompatibleBlockPointer;
+ return ConvTy;
+}
+
+/// CheckAssignmentConstraints (C99 6.5.16) - This routine currently
+/// has code to accommodate several GCC extensions when type checking
+/// pointers. Here are some objectionable examples that GCC considers warnings:
+///
+/// int a, *pint;
+/// short *pshort;
+/// struct foo *pfoo;
+///
+/// pint = pshort; // warning: assignment from incompatible pointer type
+/// a = pint; // warning: assignment makes integer from pointer without a cast
+/// pint = a; // warning: assignment makes pointer from integer without a cast
+/// pint = pfoo; // warning: assignment from incompatible pointer type
+///
+/// As a result, the code for dealing with pointers is more complex than the
+/// C99 spec dictates.
+///
+Sema::AssignConvertType
+Sema::CheckAssignmentConstraints(QualType lhsType, QualType rhsType) {
+ // Get canonical types. We're not formatting these types, just comparing
+ // them.
+ lhsType = Context.getCanonicalType(lhsType).getUnqualifiedType();
+ rhsType = Context.getCanonicalType(rhsType).getUnqualifiedType();
+
+ if (lhsType == rhsType)
+ return Compatible; // Common case: fast path an exact match.
+
+ // If the left-hand side is a reference type, then we are in a
+ // (rare!) case where we've allowed the use of references in C,
+ // e.g., as a parameter type in a built-in function. In this case,
+ // just make sure that the type referenced is compatible with the
+ // right-hand side type. The caller is responsible for adjusting
+ // lhsType so that the resulting expression does not have reference
+ // type.
+ if (const ReferenceType *lhsTypeRef = lhsType->getAsReferenceType()) {
+ if (Context.typesAreCompatible(lhsTypeRef->getPointeeType(), rhsType))
+ return Compatible;
+ return Incompatible;
+ }
+
+ if (lhsType->isObjCQualifiedIdType() || rhsType->isObjCQualifiedIdType()) {
+ if (ObjCQualifiedIdTypesAreCompatible(lhsType, rhsType, false))
+ return Compatible;
+ // Relax integer conversions like we do for pointers below.
+ if (rhsType->isIntegerType())
+ return IntToPointer;
+ if (lhsType->isIntegerType())
+ return PointerToInt;
+ return IncompatibleObjCQualifiedId;
+ }
+
+ if (lhsType->isVectorType() || rhsType->isVectorType()) {
+ // For ExtVector, allow vector splats; float -> <n x float>
+ if (const ExtVectorType *LV = lhsType->getAsExtVectorType())
+ if (LV->getElementType() == rhsType)
+ return Compatible;
+
+ // If we are allowing lax vector conversions, and LHS and RHS are both
+ // vectors, the total size only needs to be the same. This is a bitcast;
+ // no bits are changed but the result type is different.
+ if (getLangOptions().LaxVectorConversions &&
+ lhsType->isVectorType() && rhsType->isVectorType()) {
+ if (Context.getTypeSize(lhsType) == Context.getTypeSize(rhsType))
+ return IncompatibleVectors;
+ }
+ return Incompatible;
+ }
+
+ if (lhsType->isArithmeticType() && rhsType->isArithmeticType())
+ return Compatible;
+
+ if (isa<PointerType>(lhsType)) {
+ if (rhsType->isIntegerType())
+ return IntToPointer;
+
+ if (isa<PointerType>(rhsType))
+ return CheckPointerTypesForAssignment(lhsType, rhsType);
+
+ if (rhsType->getAsBlockPointerType()) {
+ if (lhsType->getAsPointerType()->getPointeeType()->isVoidType())
+ return Compatible;
+
+ // Treat block pointers as objects.
+ if (getLangOptions().ObjC1 &&
+ lhsType == Context.getCanonicalType(Context.getObjCIdType()))
+ return Compatible;
+ }
+ return Incompatible;
+ }
+
+ if (isa<BlockPointerType>(lhsType)) {
+ if (rhsType->isIntegerType())
+ return IntToBlockPointer;
+
+ // Treat block pointers as objects.
+ if (getLangOptions().ObjC1 &&
+ rhsType == Context.getCanonicalType(Context.getObjCIdType()))
+ return Compatible;
+
+ if (rhsType->isBlockPointerType())
+ return CheckBlockPointerTypesForAssignment(lhsType, rhsType);
+
+ if (const PointerType *RHSPT = rhsType->getAsPointerType()) {
+ if (RHSPT->getPointeeType()->isVoidType())
+ return Compatible;
+ }
+ return Incompatible;
+ }
+
+ if (isa<PointerType>(rhsType)) {
+ // C99 6.5.16.1p1: the left operand is _Bool and the right is a pointer.
+ if (lhsType == Context.BoolTy)
+ return Compatible;
+
+ if (lhsType->isIntegerType())
+ return PointerToInt;
+
+ if (isa<PointerType>(lhsType))
+ return CheckPointerTypesForAssignment(lhsType, rhsType);
+
+ if (isa<BlockPointerType>(lhsType) &&
+ rhsType->getAsPointerType()->getPointeeType()->isVoidType())
+ return Compatible;
+ return Incompatible;
+ }
+
+ if (isa<TagType>(lhsType) && isa<TagType>(rhsType)) {
+ if (Context.typesAreCompatible(lhsType, rhsType))
+ return Compatible;
+ }
+ return Incompatible;
+}
+
+/// \brief Constructs a transparent union from an expression that is
+/// used to initialize the transparent union.
+static void ConstructTransparentUnion(ASTContext &C, Expr *&E,
+ QualType UnionType, FieldDecl *Field) {
+ // Build an initializer list that designates the appropriate member
+ // of the transparent union.
+ InitListExpr *Initializer = new (C) InitListExpr(SourceLocation(),
+ &E, 1,
+ SourceLocation());
+ Initializer->setType(UnionType);
+ Initializer->setInitializedFieldInUnion(Field);
+
+ // Build a compound literal constructing a value of the transparent
+ // union type from this initializer list.
+ E = new (C) CompoundLiteralExpr(SourceLocation(), UnionType, Initializer,
+ false);
+}
+
+Sema::AssignConvertType
+Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType, Expr *&rExpr) {
+ QualType FromType = rExpr->getType();
+
+ // If the ArgType is a Union type, we want to handle a potential
+ // transparent_union GCC extension.
+ const RecordType *UT = ArgType->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ return Incompatible;
+
+ // The field to initialize within the transparent union.
+ RecordDecl *UD = UT->getDecl();
+ FieldDecl *InitField = 0;
+ // It's compatible if the expression matches any of the fields.
+ for (RecordDecl::field_iterator it = UD->field_begin(Context),
+ itend = UD->field_end(Context);
+ it != itend; ++it) {
+ if (it->getType()->isPointerType()) {
+ // If the transparent union contains a pointer type, we allow:
+ // 1) void pointer
+ // 2) null pointer constant
+ if (FromType->isPointerType())
+ if (FromType->getAsPointerType()->getPointeeType()->isVoidType()) {
+ ImpCastExprToType(rExpr, it->getType());
+ InitField = *it;
+ break;
+ }
+
+ if (rExpr->isNullPointerConstant(Context)) {
+ ImpCastExprToType(rExpr, it->getType());
+ InitField = *it;
+ break;
+ }
+ }
+
+ if (CheckAssignmentConstraints(it->getType(), rExpr->getType())
+ == Compatible) {
+ InitField = *it;
+ break;
+ }
+ }
+
+ if (!InitField)
+ return Incompatible;
+
+ ConstructTransparentUnion(Context, rExpr, ArgType, InitField);
+ return Compatible;
+}
+
+Sema::AssignConvertType
+Sema::CheckSingleAssignmentConstraints(QualType lhsType, Expr *&rExpr) {
+ if (getLangOptions().CPlusPlus) {
+ if (!lhsType->isRecordType()) {
+ // C++ 5.17p3: If the left operand is not of class type, the
+ // expression is implicitly converted (C++ 4) to the
+ // cv-unqualified type of the left operand.
+ if (PerformImplicitConversion(rExpr, lhsType.getUnqualifiedType(),
+ "assigning"))
+ return Incompatible;
+ return Compatible;
+ }
+
+ // FIXME: Currently, we fall through and treat C++ classes like C
+ // structures.
+ }
+
+ // C99 6.5.16.1p1: the left operand is a pointer and the right is
+ // a null pointer constant.
+ if ((lhsType->isPointerType() ||
+ lhsType->isObjCQualifiedIdType() ||
+ lhsType->isBlockPointerType())
+ && rExpr->isNullPointerConstant(Context)) {
+ ImpCastExprToType(rExpr, lhsType);
+ return Compatible;
+ }
+
+ // This check seems unnatural, however it is necessary to ensure the proper
+ // conversion of functions/arrays. If the conversion were done for all
+ // DeclExpr's (created by ActOnIdentifierExpr), it would mess up the unary
+ // expressions that surpress this implicit conversion (&, sizeof).
+ //
+ // Suppress this for references: C++ 8.5.3p5.
+ if (!lhsType->isReferenceType())
+ DefaultFunctionArrayConversion(rExpr);
+
+ Sema::AssignConvertType result =
+ CheckAssignmentConstraints(lhsType, rExpr->getType());
+
+ // C99 6.5.16.1p2: The value of the right operand is converted to the
+ // type of the assignment expression.
+ // CheckAssignmentConstraints allows the left-hand side to be a reference,
+ // so that we can use references in built-in functions even in C.
+ // The getNonReferenceType() call makes sure that the resulting expression
+ // does not have reference type.
+ if (result != Incompatible && rExpr->getType() != lhsType)
+ ImpCastExprToType(rExpr, lhsType.getNonReferenceType());
+ return result;
+}
+
+QualType Sema::InvalidOperands(SourceLocation Loc, Expr *&lex, Expr *&rex) {
+ Diag(Loc, diag::err_typecheck_invalid_operands)
+ << lex->getType() << rex->getType()
+ << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+}
+
+inline QualType Sema::CheckVectorOperands(SourceLocation Loc, Expr *&lex,
+ Expr *&rex) {
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType lhsType =
+ Context.getCanonicalType(lex->getType()).getUnqualifiedType();
+ QualType rhsType =
+ Context.getCanonicalType(rex->getType()).getUnqualifiedType();
+
+ // If the vector types are identical, return.
+ if (lhsType == rhsType)
+ return lhsType;
+
+ // Handle the case of a vector & extvector type of the same size and element
+ // type. It would be nice if we only had one vector type someday.
+ if (getLangOptions().LaxVectorConversions) {
+ // FIXME: Should we warn here?
+ if (const VectorType *LV = lhsType->getAsVectorType()) {
+ if (const VectorType *RV = rhsType->getAsVectorType())
+ if (LV->getElementType() == RV->getElementType() &&
+ LV->getNumElements() == RV->getNumElements()) {
+ return lhsType->isExtVectorType() ? lhsType : rhsType;
+ }
+ }
+ }
+
+ // If the lhs is an extended vector and the rhs is a scalar of the same type
+ // or a literal, promote the rhs to the vector type.
+ if (const ExtVectorType *V = lhsType->getAsExtVectorType()) {
+ QualType eltType = V->getElementType();
+
+ if ((eltType->getAsBuiltinType() == rhsType->getAsBuiltinType()) ||
+ (eltType->isIntegerType() && isa<IntegerLiteral>(rex)) ||
+ (eltType->isFloatingType() && isa<FloatingLiteral>(rex))) {
+ ImpCastExprToType(rex, lhsType);
+ return lhsType;
+ }
+ }
+
+ // If the rhs is an extended vector and the lhs is a scalar of the same type,
+ // promote the lhs to the vector type.
+ if (const ExtVectorType *V = rhsType->getAsExtVectorType()) {
+ QualType eltType = V->getElementType();
+
+ if ((eltType->getAsBuiltinType() == lhsType->getAsBuiltinType()) ||
+ (eltType->isIntegerType() && isa<IntegerLiteral>(lex)) ||
+ (eltType->isFloatingType() && isa<FloatingLiteral>(lex))) {
+ ImpCastExprToType(lex, rhsType);
+ return rhsType;
+ }
+ }
+
+ // You cannot convert between vector values of different size.
+ Diag(Loc, diag::err_typecheck_vector_not_convertable)
+ << lex->getType() << rex->getType()
+ << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+}
+
+inline QualType Sema::CheckMultiplyDivideOperands(
+ Expr *&lex, Expr *&rex, SourceLocation Loc, bool isCompAssign)
+{
+ if (lex->getType()->isVectorType() || rex->getType()->isVectorType())
+ return CheckVectorOperands(Loc, lex, rex);
+
+ QualType compType = UsualArithmeticConversions(lex, rex, isCompAssign);
+
+ if (lex->getType()->isArithmeticType() && rex->getType()->isArithmeticType())
+ return compType;
+ return InvalidOperands(Loc, lex, rex);
+}
+
+inline QualType Sema::CheckRemainderOperands(
+ Expr *&lex, Expr *&rex, SourceLocation Loc, bool isCompAssign)
+{
+ if (lex->getType()->isVectorType() || rex->getType()->isVectorType()) {
+ if (lex->getType()->isIntegerType() && rex->getType()->isIntegerType())
+ return CheckVectorOperands(Loc, lex, rex);
+ return InvalidOperands(Loc, lex, rex);
+ }
+
+ QualType compType = UsualArithmeticConversions(lex, rex, isCompAssign);
+
+ if (lex->getType()->isIntegerType() && rex->getType()->isIntegerType())
+ return compType;
+ return InvalidOperands(Loc, lex, rex);
+}
+
+inline QualType Sema::CheckAdditionOperands( // C99 6.5.6
+ Expr *&lex, Expr *&rex, SourceLocation Loc, QualType* CompLHSTy)
+{
+ if (lex->getType()->isVectorType() || rex->getType()->isVectorType()) {
+ QualType compType = CheckVectorOperands(Loc, lex, rex);
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ QualType compType = UsualArithmeticConversions(lex, rex, CompLHSTy);
+
+ // handle the common case first (both operands are arithmetic).
+ if (lex->getType()->isArithmeticType() &&
+ rex->getType()->isArithmeticType()) {
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ // Put any potential pointer into PExp
+ Expr* PExp = lex, *IExp = rex;
+ if (IExp->getType()->isPointerType())
+ std::swap(PExp, IExp);
+
+ if (const PointerType *PTy = PExp->getType()->getAsPointerType()) {
+ if (IExp->getType()->isIntegerType()) {
+ QualType PointeeTy = PTy->getPointeeType();
+ // Check for arithmetic on pointers to incomplete types.
+ if (PointeeTy->isVoidType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(Loc, diag::err_typecheck_pointer_arith_void_type)
+ << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+ }
+
+ // GNU extension: arithmetic on pointer to void
+ Diag(Loc, diag::ext_gnu_void_ptr)
+ << lex->getSourceRange() << rex->getSourceRange();
+ } else if (PointeeTy->isFunctionType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(Loc, diag::err_typecheck_pointer_arith_function_type)
+ << lex->getType() << lex->getSourceRange();
+ return QualType();
+ }
+
+ // GNU extension: arithmetic on pointer to function
+ Diag(Loc, diag::ext_gnu_ptr_func_arith)
+ << lex->getType() << lex->getSourceRange();
+ } else if (!PTy->isDependentType() &&
+ RequireCompleteType(Loc, PointeeTy,
+ diag::err_typecheck_arithmetic_incomplete_type,
+ PExp->getSourceRange(), SourceRange(),
+ PExp->getType()))
+ return QualType();
+
+ // Diagnose bad cases where we step over interface counts.
+ if (PointeeTy->isObjCInterfaceType() && LangOpts.ObjCNonFragileABI) {
+ Diag(Loc, diag::err_arithmetic_nonfragile_interface)
+ << PointeeTy << PExp->getSourceRange();
+ return QualType();
+ }
+
+ if (CompLHSTy) {
+ QualType LHSTy = lex->getType();
+ if (LHSTy->isPromotableIntegerType())
+ LHSTy = Context.IntTy;
+ else {
+ QualType T = isPromotableBitField(lex, Context);
+ if (!T.isNull())
+ LHSTy = T;
+ }
+
+ *CompLHSTy = LHSTy;
+ }
+ return PExp->getType();
+ }
+ }
+
+ return InvalidOperands(Loc, lex, rex);
+}
+
+// C99 6.5.6
+QualType Sema::CheckSubtractionOperands(Expr *&lex, Expr *&rex,
+ SourceLocation Loc, QualType* CompLHSTy) {
+ if (lex->getType()->isVectorType() || rex->getType()->isVectorType()) {
+ QualType compType = CheckVectorOperands(Loc, lex, rex);
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ QualType compType = UsualArithmeticConversions(lex, rex, CompLHSTy);
+
+ // Enforce type constraints: C99 6.5.6p3.
+
+ // Handle the common case first (both operands are arithmetic).
+ if (lex->getType()->isArithmeticType()
+ && rex->getType()->isArithmeticType()) {
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ // Either ptr - int or ptr - ptr.
+ if (const PointerType *LHSPTy = lex->getType()->getAsPointerType()) {
+ QualType lpointee = LHSPTy->getPointeeType();
+
+ // The LHS must be an completely-defined object type.
+
+ bool ComplainAboutVoid = false;
+ Expr *ComplainAboutFunc = 0;
+ if (lpointee->isVoidType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(Loc, diag::err_typecheck_pointer_arith_void_type)
+ << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+ }
+
+ // GNU C extension: arithmetic on pointer to void
+ ComplainAboutVoid = true;
+ } else if (lpointee->isFunctionType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(Loc, diag::err_typecheck_pointer_arith_function_type)
+ << lex->getType() << lex->getSourceRange();
+ return QualType();
+ }
+
+ // GNU C extension: arithmetic on pointer to function
+ ComplainAboutFunc = lex;
+ } else if (!lpointee->isDependentType() &&
+ RequireCompleteType(Loc, lpointee,
+ diag::err_typecheck_sub_ptr_object,
+ lex->getSourceRange(),
+ SourceRange(),
+ lex->getType()))
+ return QualType();
+
+ // Diagnose bad cases where we step over interface counts.
+ if (lpointee->isObjCInterfaceType() && LangOpts.ObjCNonFragileABI) {
+ Diag(Loc, diag::err_arithmetic_nonfragile_interface)
+ << lpointee << lex->getSourceRange();
+ return QualType();
+ }
+
+ // The result type of a pointer-int computation is the pointer type.
+ if (rex->getType()->isIntegerType()) {
+ if (ComplainAboutVoid)
+ Diag(Loc, diag::ext_gnu_void_ptr)
+ << lex->getSourceRange() << rex->getSourceRange();
+ if (ComplainAboutFunc)
+ Diag(Loc, diag::ext_gnu_ptr_func_arith)
+ << ComplainAboutFunc->getType()
+ << ComplainAboutFunc->getSourceRange();
+
+ if (CompLHSTy) *CompLHSTy = lex->getType();
+ return lex->getType();
+ }
+
+ // Handle pointer-pointer subtractions.
+ if (const PointerType *RHSPTy = rex->getType()->getAsPointerType()) {
+ QualType rpointee = RHSPTy->getPointeeType();
+
+ // RHS must be a completely-type object type.
+ // Handle the GNU void* extension.
+ if (rpointee->isVoidType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(Loc, diag::err_typecheck_pointer_arith_void_type)
+ << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+ }
+
+ ComplainAboutVoid = true;
+ } else if (rpointee->isFunctionType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(Loc, diag::err_typecheck_pointer_arith_function_type)
+ << rex->getType() << rex->getSourceRange();
+ return QualType();
+ }
+
+ // GNU extension: arithmetic on pointer to function
+ if (!ComplainAboutFunc)
+ ComplainAboutFunc = rex;
+ } else if (!rpointee->isDependentType() &&
+ RequireCompleteType(Loc, rpointee,
+ diag::err_typecheck_sub_ptr_object,
+ rex->getSourceRange(),
+ SourceRange(),
+ rex->getType()))
+ return QualType();
+
+ if (getLangOptions().CPlusPlus) {
+ // Pointee types must be the same: C++ [expr.add]
+ if (!Context.hasSameUnqualifiedType(lpointee, rpointee)) {
+ Diag(Loc, diag::err_typecheck_sub_ptr_compatible)
+ << lex->getType() << rex->getType()
+ << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+ }
+ } else {
+ // Pointee types must be compatible C99 6.5.6p3
+ if (!Context.typesAreCompatible(
+ Context.getCanonicalType(lpointee).getUnqualifiedType(),
+ Context.getCanonicalType(rpointee).getUnqualifiedType())) {
+ Diag(Loc, diag::err_typecheck_sub_ptr_compatible)
+ << lex->getType() << rex->getType()
+ << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+ }
+ }
+
+ if (ComplainAboutVoid)
+ Diag(Loc, diag::ext_gnu_void_ptr)
+ << lex->getSourceRange() << rex->getSourceRange();
+ if (ComplainAboutFunc)
+ Diag(Loc, diag::ext_gnu_ptr_func_arith)
+ << ComplainAboutFunc->getType()
+ << ComplainAboutFunc->getSourceRange();
+
+ if (CompLHSTy) *CompLHSTy = lex->getType();
+ return Context.getPointerDiffType();
+ }
+ }
+
+ return InvalidOperands(Loc, lex, rex);
+}
+
+// C99 6.5.7
+QualType Sema::CheckShiftOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
+ bool isCompAssign) {
+ // C99 6.5.7p2: Each of the operands shall have integer type.
+ if (!lex->getType()->isIntegerType() || !rex->getType()->isIntegerType())
+ return InvalidOperands(Loc, lex, rex);
+
+ // Shifts don't perform usual arithmetic conversions, they just do integer
+ // promotions on each operand. C99 6.5.7p3
+ QualType LHSTy;
+ if (lex->getType()->isPromotableIntegerType())
+ LHSTy = Context.IntTy;
+ else {
+ LHSTy = isPromotableBitField(lex, Context);
+ if (LHSTy.isNull())
+ LHSTy = lex->getType();
+ }
+ if (!isCompAssign)
+ ImpCastExprToType(lex, LHSTy);
+
+ UsualUnaryConversions(rex);
+
+ // "The type of the result is that of the promoted left operand."
+ return LHSTy;
+}
+
+// C99 6.5.8, C++ [expr.rel]
+QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc,
+ unsigned OpaqueOpc, bool isRelational) {
+ BinaryOperator::Opcode Opc = (BinaryOperator::Opcode)OpaqueOpc;
+
+ if (lex->getType()->isVectorType() || rex->getType()->isVectorType())
+ return CheckVectorCompareOperands(lex, rex, Loc, isRelational);
+
+ // C99 6.5.8p3 / C99 6.5.9p4
+ if (lex->getType()->isArithmeticType() && rex->getType()->isArithmeticType())
+ UsualArithmeticConversions(lex, rex);
+ else {
+ UsualUnaryConversions(lex);
+ UsualUnaryConversions(rex);
+ }
+ QualType lType = lex->getType();
+ QualType rType = rex->getType();
+
+ if (!lType->isFloatingType()
+ && !(lType->isBlockPointerType() && isRelational)) {
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ // NOTE: Don't warn about comparisons of enum constants. These can arise
+ // from macro expansions, and are usually quite deliberate.
+ Expr *LHSStripped = lex->IgnoreParens();
+ Expr *RHSStripped = rex->IgnoreParens();
+ if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHSStripped))
+ if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHSStripped))
+ if (DRL->getDecl() == DRR->getDecl() &&
+ !isa<EnumConstantDecl>(DRL->getDecl()))
+ Diag(Loc, diag::warn_selfcomparison);
+
+ if (isa<CastExpr>(LHSStripped))
+ LHSStripped = LHSStripped->IgnoreParenCasts();
+ if (isa<CastExpr>(RHSStripped))
+ RHSStripped = RHSStripped->IgnoreParenCasts();
+
+ // Warn about comparisons against a string constant (unless the other
+ // operand is null), the user probably wants strcmp.
+ Expr *literalString = 0;
+ Expr *literalStringStripped = 0;
+ if ((isa<StringLiteral>(LHSStripped) || isa<ObjCEncodeExpr>(LHSStripped)) &&
+ !RHSStripped->isNullPointerConstant(Context)) {
+ literalString = lex;
+ literalStringStripped = LHSStripped;
+ }
+ else if ((isa<StringLiteral>(RHSStripped) ||
+ isa<ObjCEncodeExpr>(RHSStripped)) &&
+ !LHSStripped->isNullPointerConstant(Context)) {
+ literalString = rex;
+ literalStringStripped = RHSStripped;
+ }
+
+ if (literalString) {
+ std::string resultComparison;
+ switch (Opc) {
+ case BinaryOperator::LT: resultComparison = ") < 0"; break;
+ case BinaryOperator::GT: resultComparison = ") > 0"; break;
+ case BinaryOperator::LE: resultComparison = ") <= 0"; break;
+ case BinaryOperator::GE: resultComparison = ") >= 0"; break;
+ case BinaryOperator::EQ: resultComparison = ") == 0"; break;
+ case BinaryOperator::NE: resultComparison = ") != 0"; break;
+ default: assert(false && "Invalid comparison operator");
+ }
+ Diag(Loc, diag::warn_stringcompare)
+ << isa<ObjCEncodeExpr>(literalStringStripped)
+ << literalString->getSourceRange()
+ << CodeModificationHint::CreateReplacement(SourceRange(Loc), ", ")
+ << CodeModificationHint::CreateInsertion(lex->getLocStart(),
+ "strcmp(")
+ << CodeModificationHint::CreateInsertion(
+ PP.getLocForEndOfToken(rex->getLocEnd()),
+ resultComparison);
+ }
+ }
+
+ // The result of comparisons is 'bool' in C++, 'int' in C.
+ QualType ResultTy = getLangOptions().CPlusPlus? Context.BoolTy :Context.IntTy;
+
+ if (isRelational) {
+ if (lType->isRealType() && rType->isRealType())
+ return ResultTy;
+ } else {
+ // Check for comparisons of floating point operands using != and ==.
+ if (lType->isFloatingType()) {
+ assert(rType->isFloatingType());
+ CheckFloatComparison(Loc,lex,rex);
+ }
+
+ if (lType->isArithmeticType() && rType->isArithmeticType())
+ return ResultTy;
+ }
+
+ bool LHSIsNull = lex->isNullPointerConstant(Context);
+ bool RHSIsNull = rex->isNullPointerConstant(Context);
+
+ // All of the following pointer related warnings are GCC extensions, except
+ // when handling null pointer constants. One day, we can consider making them
+ // errors (when -pedantic-errors is enabled).
+ if (lType->isPointerType() && rType->isPointerType()) { // C99 6.5.8p2
+ QualType LCanPointeeTy =
+ Context.getCanonicalType(lType->getAsPointerType()->getPointeeType());
+ QualType RCanPointeeTy =
+ Context.getCanonicalType(rType->getAsPointerType()->getPointeeType());
+
+ // Simple check: if the pointee types are identical, we're done.
+ if (LCanPointeeTy == RCanPointeeTy)
+ return ResultTy;
+
+ if (getLangOptions().CPlusPlus) {
+ // C++ [expr.rel]p2:
+ // [...] Pointer conversions (4.10) and qualification
+ // conversions (4.4) are performed on pointer operands (or on
+ // a pointer operand and a null pointer constant) to bring
+ // them to their composite pointer type. [...]
+ //
+ // C++ [expr.eq]p2 uses the same notion for (in)equality
+ // comparisons of pointers.
+ QualType T = FindCompositePointerType(lex, rex);
+ if (T.isNull()) {
+ Diag(Loc, diag::err_typecheck_comparison_of_distinct_pointers)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+ }
+
+ ImpCastExprToType(lex, T);
+ ImpCastExprToType(rex, T);
+ return ResultTy;
+ }
+
+ if (!LHSIsNull && !RHSIsNull && // C99 6.5.9p2
+ !LCanPointeeTy->isVoidType() && !RCanPointeeTy->isVoidType() &&
+ !Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(),
+ RCanPointeeTy.getUnqualifiedType()) &&
+ !Context.areComparableObjCPointerTypes(lType, rType)) {
+ Diag(Loc, diag::ext_typecheck_comparison_of_distinct_pointers)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ }
+ ImpCastExprToType(rex, lType); // promote the pointer to pointer
+ return ResultTy;
+ }
+ // C++ allows comparison of pointers with null pointer constants.
+ if (getLangOptions().CPlusPlus) {
+ if (lType->isPointerType() && RHSIsNull) {
+ ImpCastExprToType(rex, lType);
+ return ResultTy;
+ }
+ if (rType->isPointerType() && LHSIsNull) {
+ ImpCastExprToType(lex, rType);
+ return ResultTy;
+ }
+ // And comparison of nullptr_t with itself.
+ if (lType->isNullPtrType() && rType->isNullPtrType())
+ return ResultTy;
+ }
+ // Handle block pointer types.
+ if (!isRelational && lType->isBlockPointerType() && rType->isBlockPointerType()) {
+ QualType lpointee = lType->getAsBlockPointerType()->getPointeeType();
+ QualType rpointee = rType->getAsBlockPointerType()->getPointeeType();
+
+ if (!LHSIsNull && !RHSIsNull &&
+ !Context.typesAreBlockCompatible(lpointee, rpointee)) {
+ Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ }
+ ImpCastExprToType(rex, lType); // promote the pointer to pointer
+ return ResultTy;
+ }
+ // Allow block pointers to be compared with null pointer constants.
+ if (!isRelational
+ && ((lType->isBlockPointerType() && rType->isPointerType())
+ || (lType->isPointerType() && rType->isBlockPointerType()))) {
+ if (!LHSIsNull && !RHSIsNull) {
+ if (!((rType->isPointerType() && rType->getAsPointerType()
+ ->getPointeeType()->isVoidType())
+ || (lType->isPointerType() && lType->getAsPointerType()
+ ->getPointeeType()->isVoidType())))
+ Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ }
+ ImpCastExprToType(rex, lType); // promote the pointer to pointer
+ return ResultTy;
+ }
+
+ if ((lType->isObjCQualifiedIdType() || rType->isObjCQualifiedIdType())) {
+ if (lType->isPointerType() || rType->isPointerType()) {
+ const PointerType *LPT = lType->getAsPointerType();
+ const PointerType *RPT = rType->getAsPointerType();
+ bool LPtrToVoid = LPT ?
+ Context.getCanonicalType(LPT->getPointeeType())->isVoidType() : false;
+ bool RPtrToVoid = RPT ?
+ Context.getCanonicalType(RPT->getPointeeType())->isVoidType() : false;
+
+ if (!LPtrToVoid && !RPtrToVoid &&
+ !Context.typesAreCompatible(lType, rType)) {
+ Diag(Loc, diag::ext_typecheck_comparison_of_distinct_pointers)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ ImpCastExprToType(rex, lType);
+ return ResultTy;
+ }
+ ImpCastExprToType(rex, lType);
+ return ResultTy;
+ }
+ if (ObjCQualifiedIdTypesAreCompatible(lType, rType, true)) {
+ ImpCastExprToType(rex, lType);
+ return ResultTy;
+ } else {
+ if ((lType->isObjCQualifiedIdType() && rType->isObjCQualifiedIdType())) {
+ Diag(Loc, diag::warn_incompatible_qualified_id_operands)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ ImpCastExprToType(rex, lType);
+ return ResultTy;
+ }
+ }
+ }
+ if ((lType->isPointerType() || lType->isObjCQualifiedIdType()) &&
+ rType->isIntegerType()) {
+ if (!RHSIsNull)
+ Diag(Loc, diag::ext_typecheck_comparison_of_pointer_integer)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ ImpCastExprToType(rex, lType); // promote the integer to pointer
+ return ResultTy;
+ }
+ if (lType->isIntegerType() &&
+ (rType->isPointerType() || rType->isObjCQualifiedIdType())) {
+ if (!LHSIsNull)
+ Diag(Loc, diag::ext_typecheck_comparison_of_pointer_integer)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ ImpCastExprToType(lex, rType); // promote the integer to pointer
+ return ResultTy;
+ }
+ // Handle block pointers.
+ if (!isRelational && RHSIsNull
+ && lType->isBlockPointerType() && rType->isIntegerType()) {
+ ImpCastExprToType(rex, lType); // promote the integer to pointer
+ return ResultTy;
+ }
+ if (!isRelational && LHSIsNull
+ && lType->isIntegerType() && rType->isBlockPointerType()) {
+ ImpCastExprToType(lex, rType); // promote the integer to pointer
+ return ResultTy;
+ }
+ return InvalidOperands(Loc, lex, rex);
+}
+
+/// CheckVectorCompareOperands - vector comparisons are a clang extension that
+/// operates on extended vector types. Instead of producing an IntTy result,
+/// like a scalar comparison, a vector comparison produces a vector of integer
+/// types.
+QualType Sema::CheckVectorCompareOperands(Expr *&lex, Expr *&rex,
+ SourceLocation Loc,
+ bool isRelational) {
+ // Check to make sure we're operating on vectors of the same type and width,
+ // Allowing one side to be a scalar of element type.
+ QualType vType = CheckVectorOperands(Loc, lex, rex);
+ if (vType.isNull())
+ return vType;
+
+ QualType lType = lex->getType();
+ QualType rType = rex->getType();
+
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ if (!lType->isFloatingType()) {
+ if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(lex->IgnoreParens()))
+ if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(rex->IgnoreParens()))
+ if (DRL->getDecl() == DRR->getDecl())
+ Diag(Loc, diag::warn_selfcomparison);
+ }
+
+ // Check for comparisons of floating point operands using != and ==.
+ if (!isRelational && lType->isFloatingType()) {
+ assert (rType->isFloatingType());
+ CheckFloatComparison(Loc,lex,rex);
+ }
+
+ // FIXME: Vector compare support in the LLVM backend is not fully reliable,
+ // just reject all vector comparisons for now.
+ if (1) {
+ Diag(Loc, diag::err_typecheck_vector_comparison)
+ << lType << rType << lex->getSourceRange() << rex->getSourceRange();
+ return QualType();
+ }
+
+ // Return the type for the comparison, which is the same as vector type for
+ // integer vectors, or an integer type of identical size and number of
+ // elements for floating point vectors.
+ if (lType->isIntegerType())
+ return lType;
+
+ const VectorType *VTy = lType->getAsVectorType();
+ unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
+ if (TypeSize == Context.getTypeSize(Context.IntTy))
+ return Context.getExtVectorType(Context.IntTy, VTy->getNumElements());
+ if (TypeSize == Context.getTypeSize(Context.LongTy))
+ return Context.getExtVectorType(Context.LongTy, VTy->getNumElements());
+
+ assert(TypeSize == Context.getTypeSize(Context.LongLongTy) &&
+ "Unhandled vector element size in vector compare");
+ return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements());
+}
+
+inline QualType Sema::CheckBitwiseOperands(
+ Expr *&lex, Expr *&rex, SourceLocation Loc, bool isCompAssign)
+{
+ if (lex->getType()->isVectorType() || rex->getType()->isVectorType())
+ return CheckVectorOperands(Loc, lex, rex);
+
+ QualType compType = UsualArithmeticConversions(lex, rex, isCompAssign);
+
+ if (lex->getType()->isIntegerType() && rex->getType()->isIntegerType())
+ return compType;
+ return InvalidOperands(Loc, lex, rex);
+}
+
+inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
+ Expr *&lex, Expr *&rex, SourceLocation Loc)
+{
+ UsualUnaryConversions(lex);
+ UsualUnaryConversions(rex);
+
+ if (lex->getType()->isScalarType() && rex->getType()->isScalarType())
+ return Context.IntTy;
+ return InvalidOperands(Loc, lex, rex);
+}
+
+/// IsReadonlyProperty - Verify that otherwise a valid l-value expression
+/// is a read-only property; return true if so. A readonly property expression
+/// depends on various declarations and thus must be treated specially.
+///
+static bool IsReadonlyProperty(Expr *E, Sema &S)
+{
+ if (E->getStmtClass() == Expr::ObjCPropertyRefExprClass) {
+ const ObjCPropertyRefExpr* PropExpr = cast<ObjCPropertyRefExpr>(E);
+ if (ObjCPropertyDecl *PDecl = PropExpr->getProperty()) {
+ QualType BaseType = PropExpr->getBase()->getType();
+ if (const PointerType *PTy = BaseType->getAsPointerType())
+ if (const ObjCInterfaceType *IFTy =
+ PTy->getPointeeType()->getAsObjCInterfaceType())
+ if (ObjCInterfaceDecl *IFace = IFTy->getDecl())
+ if (S.isPropertyReadonly(PDecl, IFace))
+ return true;
+ }
+ }
+ return false;
+}
+
+/// CheckForModifiableLvalue - Verify that E is a modifiable lvalue. If not,
+/// emit an error and return true. If so, return false.
+static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
+ SourceLocation OrigLoc = Loc;
+ Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(S.Context,
+ &Loc);
+ if (IsLV == Expr::MLV_Valid && IsReadonlyProperty(E, S))
+ IsLV = Expr::MLV_ReadonlyProperty;
+ if (IsLV == Expr::MLV_Valid)
+ return false;
+
+ unsigned Diag = 0;
+ bool NeedType = false;
+ switch (IsLV) { // C99 6.5.16p2
+ default: assert(0 && "Unknown result from isModifiableLvalue!");
+ case Expr::MLV_ConstQualified: Diag = diag::err_typecheck_assign_const; break;
+ case Expr::MLV_ArrayType:
+ Diag = diag::err_typecheck_array_not_modifiable_lvalue;
+ NeedType = true;
+ break;
+ case Expr::MLV_NotObjectType:
+ Diag = diag::err_typecheck_non_object_not_modifiable_lvalue;
+ NeedType = true;
+ break;
+ case Expr::MLV_LValueCast:
+ Diag = diag::err_typecheck_lvalue_casts_not_supported;
+ break;
+ case Expr::MLV_InvalidExpression:
+ Diag = diag::err_typecheck_expression_not_modifiable_lvalue;
+ break;
+ case Expr::MLV_IncompleteType:
+ case Expr::MLV_IncompleteVoidType:
+ return S.RequireCompleteType(Loc, E->getType(),
+ diag::err_typecheck_incomplete_type_not_modifiable_lvalue,
+ E->getSourceRange());
+ case Expr::MLV_DuplicateVectorComponents:
+ Diag = diag::err_typecheck_duplicate_vector_components_not_mlvalue;
+ break;
+ case Expr::MLV_NotBlockQualified:
+ Diag = diag::err_block_decl_ref_not_modifiable_lvalue;
+ break;
+ case Expr::MLV_ReadonlyProperty:
+ Diag = diag::error_readonly_property_assignment;
+ break;
+ case Expr::MLV_NoSetterProperty:
+ Diag = diag::error_nosetter_property_assignment;
+ break;
+ }
+
+ SourceRange Assign;
+ if (Loc != OrigLoc)
+ Assign = SourceRange(OrigLoc, OrigLoc);
+ if (NeedType)
+ S.Diag(Loc, Diag) << E->getType() << E->getSourceRange() << Assign;
+ else
+ S.Diag(Loc, Diag) << E->getSourceRange() << Assign;
+ return true;
+}
+
+
+
+// C99 6.5.16.1
+QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS,
+ SourceLocation Loc,
+ QualType CompoundType) {
+ // Verify that LHS is a modifiable lvalue, and emit error if not.
+ if (CheckForModifiableLvalue(LHS, Loc, *this))
+ return QualType();
+
+ QualType LHSType = LHS->getType();
+ QualType RHSType = CompoundType.isNull() ? RHS->getType() : CompoundType;
+
+ AssignConvertType ConvTy;
+ if (CompoundType.isNull()) {
+ // Simple assignment "x = y".
+ ConvTy = CheckSingleAssignmentConstraints(LHSType, RHS);
+ // Special case of NSObject attributes on c-style pointer types.
+ if (ConvTy == IncompatiblePointer &&
+ ((Context.isObjCNSObjectType(LHSType) &&
+ Context.isObjCObjectPointerType(RHSType)) ||
+ (Context.isObjCNSObjectType(RHSType) &&
+ Context.isObjCObjectPointerType(LHSType))))
+ ConvTy = Compatible;
+
+ // If the RHS is a unary plus or minus, check to see if they = and + are
+ // right next to each other. If so, the user may have typo'd "x =+ 4"
+ // instead of "x += 4".
+ Expr *RHSCheck = RHS;
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RHSCheck))
+ RHSCheck = ICE->getSubExpr();
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(RHSCheck)) {
+ if ((UO->getOpcode() == UnaryOperator::Plus ||
+ UO->getOpcode() == UnaryOperator::Minus) &&
+ Loc.isFileID() && UO->getOperatorLoc().isFileID() &&
+ // Only if the two operators are exactly adjacent.
+ Loc.getFileLocWithOffset(1) == UO->getOperatorLoc() &&
+ // And there is a space or other character before the subexpr of the
+ // unary +/-. We don't want to warn on "x=-1".
+ Loc.getFileLocWithOffset(2) != UO->getSubExpr()->getLocStart() &&
+ UO->getSubExpr()->getLocStart().isFileID()) {
+ Diag(Loc, diag::warn_not_compound_assign)
+ << (UO->getOpcode() == UnaryOperator::Plus ? "+" : "-")
+ << SourceRange(UO->getOperatorLoc(), UO->getOperatorLoc());
+ }
+ }
+ } else {
+ // Compound assignment "x += y"
+ ConvTy = CheckAssignmentConstraints(LHSType, RHSType);
+ }
+
+ if (DiagnoseAssignmentResult(ConvTy, Loc, LHSType, RHSType,
+ RHS, "assigning"))
+ return QualType();
+
+ // C99 6.5.16p3: The type of an assignment expression is the type of the
+ // left operand unless the left operand has qualified type, in which case
+ // it is the unqualified version of the type of the left operand.
+ // C99 6.5.16.1p2: In simple assignment, the value of the right operand
+ // is converted to the type of the assignment expression (above).
+ // C++ 5.17p1: the type of the assignment expression is that of its left
+ // operand.
+ return LHSType.getUnqualifiedType();
+}
+
+// C99 6.5.17
+QualType Sema::CheckCommaOperands(Expr *LHS, Expr *&RHS, SourceLocation Loc) {
+ // Comma performs lvalue conversion (C99 6.3.2.1), but not unary conversions.
+ DefaultFunctionArrayConversion(RHS);
+
+ // FIXME: Check that RHS type is complete in C mode (it's legal for it to be
+ // incomplete in C++).
+
+ return RHS->getType();
+}
+
+/// CheckIncrementDecrementOperand - unlike most "Check" methods, this routine
+/// doesn't need to call UsualUnaryConversions or UsualArithmeticConversions.
+QualType Sema::CheckIncrementDecrementOperand(Expr *Op, SourceLocation OpLoc,
+ bool isInc) {
+ if (Op->isTypeDependent())
+ return Context.DependentTy;
+
+ QualType ResType = Op->getType();
+ assert(!ResType.isNull() && "no type for increment/decrement expression");
+
+ if (getLangOptions().CPlusPlus && ResType->isBooleanType()) {
+ // Decrement of bool is not allowed.
+ if (!isInc) {
+ Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange();
+ return QualType();
+ }
+ // Increment of bool sets it to true, but is deprecated.
+ Diag(OpLoc, diag::warn_increment_bool) << Op->getSourceRange();
+ } else if (ResType->isRealType()) {
+ // OK!
+ } else if (const PointerType *PT = ResType->getAsPointerType()) {
+ // C99 6.5.2.4p2, 6.5.6p2
+ if (PT->getPointeeType()->isVoidType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(OpLoc, diag::err_typecheck_pointer_arith_void_type)
+ << Op->getSourceRange();
+ return QualType();
+ }
+
+ // Pointer to void is a GNU extension in C.
+ Diag(OpLoc, diag::ext_gnu_void_ptr) << Op->getSourceRange();
+ } else if (PT->getPointeeType()->isFunctionType()) {
+ if (getLangOptions().CPlusPlus) {
+ Diag(OpLoc, diag::err_typecheck_pointer_arith_function_type)
+ << Op->getType() << Op->getSourceRange();
+ return QualType();
+ }
+
+ Diag(OpLoc, diag::ext_gnu_ptr_func_arith)
+ << ResType << Op->getSourceRange();
+ } else if (RequireCompleteType(OpLoc, PT->getPointeeType(),
+ diag::err_typecheck_arithmetic_incomplete_type,
+ Op->getSourceRange(), SourceRange(),
+ ResType))
+ return QualType();
+ } else if (ResType->isComplexType()) {
+ // C99 does not support ++/-- on complex types, we allow as an extension.
+ Diag(OpLoc, diag::ext_integer_increment_complex)
+ << ResType << Op->getSourceRange();
+ } else {
+ Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement)
+ << ResType << Op->getSourceRange();
+ return QualType();
+ }
+ // At this point, we know we have a real, complex or pointer type.
+ // Now make sure the operand is a modifiable lvalue.
+ if (CheckForModifiableLvalue(Op, OpLoc, *this))
+ return QualType();
+ return ResType;
+}
+
+/// getPrimaryDecl - Helper function for CheckAddressOfOperand().
+/// This routine allows us to typecheck complex/recursive expressions
+/// where the declaration is needed for type checking. We only need to
+/// handle cases when the expression references a function designator
+/// or is an lvalue. Here are some examples:
+/// - &(x) => x
+/// - &*****f => f for f a function designator.
+/// - &s.xx => s
+/// - &s.zz[1].yy -> s, if zz is an array
+/// - *(x + 1) -> x, if x is an array
+/// - &"123"[2] -> 0
+/// - & __real__ x -> x
+static NamedDecl *getPrimaryDecl(Expr *E) {
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass:
+ case Stmt::QualifiedDeclRefExprClass:
+ return cast<DeclRefExpr>(E)->getDecl();
+ case Stmt::MemberExprClass:
+ // If this is an arrow operator, the address is an offset from
+ // the base's value, so the object the base refers to is
+ // irrelevant.
+ if (cast<MemberExpr>(E)->isArrow())
+ return 0;
+ // Otherwise, the expression refers to a part of the base
+ return getPrimaryDecl(cast<MemberExpr>(E)->getBase());
+ case Stmt::ArraySubscriptExprClass: {
+ // FIXME: This code shouldn't be necessary! We should catch the implicit
+ // promotion of register arrays earlier.
+ Expr* Base = cast<ArraySubscriptExpr>(E)->getBase();
+ if (ImplicitCastExpr* ICE = dyn_cast<ImplicitCastExpr>(Base)) {
+ if (ICE->getSubExpr()->getType()->isArrayType())
+ return getPrimaryDecl(ICE->getSubExpr());
+ }
+ return 0;
+ }
+ case Stmt::UnaryOperatorClass: {
+ UnaryOperator *UO = cast<UnaryOperator>(E);
+
+ switch(UO->getOpcode()) {
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ case UnaryOperator::Extension:
+ return getPrimaryDecl(UO->getSubExpr());
+ default:
+ return 0;
+ }
+ }
+ case Stmt::ParenExprClass:
+ return getPrimaryDecl(cast<ParenExpr>(E)->getSubExpr());
+ case Stmt::ImplicitCastExprClass:
+ // If the result of an implicit cast is an l-value, we care about
+ // the sub-expression; otherwise, the result here doesn't matter.
+ return getPrimaryDecl(cast<ImplicitCastExpr>(E)->getSubExpr());
+ default:
+ return 0;
+ }
+}
+
+/// CheckAddressOfOperand - The operand of & must be either a function
+/// designator or an lvalue designating an object. If it is an lvalue, the
+/// object cannot be declared with storage class register or be a bit field.
+/// Note: The usual conversions are *not* applied to the operand of the &
+/// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue.
+/// In C++, the operand might be an overloaded function name, in which case
+/// we allow the '&' but retain the overloaded-function type.
+QualType Sema::CheckAddressOfOperand(Expr *op, SourceLocation OpLoc) {
+ // Make sure to ignore parentheses in subsequent checks
+ op = op->IgnoreParens();
+
+ if (op->isTypeDependent())
+ return Context.DependentTy;
+
+ if (getLangOptions().C99) {
+ // Implement C99-only parts of addressof rules.
+ if (UnaryOperator* uOp = dyn_cast<UnaryOperator>(op)) {
+ if (uOp->getOpcode() == UnaryOperator::Deref)
+ // Per C99 6.5.3.2, the address of a deref always returns a valid result
+ // (assuming the deref expression is valid).
+ return uOp->getSubExpr()->getType();
+ }
+ // Technically, there should be a check for array subscript
+ // expressions here, but the result of one is always an lvalue anyway.
+ }
+ NamedDecl *dcl = getPrimaryDecl(op);
+ Expr::isLvalueResult lval = op->isLvalue(Context);
+
+ if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
+ // C99 6.5.3.2p1
+ // The operand must be either an l-value or a function designator
+ if (!op->getType()->isFunctionType()) {
+ // FIXME: emit more specific diag...
+ Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
+ << op->getSourceRange();
+ return QualType();
+ }
+ } else if (op->getBitField()) { // C99 6.5.3.2p1
+ // The operand cannot be a bit-field
+ Diag(OpLoc, diag::err_typecheck_address_of)
+ << "bit-field" << op->getSourceRange();
+ return QualType();
+ } else if (isa<ExtVectorElementExpr>(op) || (isa<ArraySubscriptExpr>(op) &&
+ cast<ArraySubscriptExpr>(op)->getBase()->getType()->isVectorType())){
+ // The operand cannot be an element of a vector
+ Diag(OpLoc, diag::err_typecheck_address_of)
+ << "vector element" << op->getSourceRange();
+ return QualType();
+ } else if (dcl) { // C99 6.5.3.2p1
+ // We have an lvalue with a decl. Make sure the decl is not declared
+ // with the register storage-class specifier.
+ if (const VarDecl *vd = dyn_cast<VarDecl>(dcl)) {
+ if (vd->getStorageClass() == VarDecl::Register) {
+ Diag(OpLoc, diag::err_typecheck_address_of)
+ << "register variable" << op->getSourceRange();
+ return QualType();
+ }
+ } else if (isa<OverloadedFunctionDecl>(dcl)) {
+ return Context.OverloadTy;
+ } else if (isa<FieldDecl>(dcl)) {
+ // Okay: we can take the address of a field.
+ // Could be a pointer to member, though, if there is an explicit
+ // scope qualifier for the class.
+ if (isa<QualifiedDeclRefExpr>(op)) {
+ DeclContext *Ctx = dcl->getDeclContext();
+ if (Ctx && Ctx->isRecord())
+ return Context.getMemberPointerType(op->getType(),
+ Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
+ }
+ } else if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(dcl)) {
+ // Okay: we can take the address of a function.
+ // As above.
+ if (isa<QualifiedDeclRefExpr>(op) && MD->isInstance())
+ return Context.getMemberPointerType(op->getType(),
+ Context.getTypeDeclType(MD->getParent()).getTypePtr());
+ } else if (!isa<FunctionDecl>(dcl))
+ assert(0 && "Unknown/unexpected decl type");
+ }
+
+ if (lval == Expr::LV_IncompleteVoidType) {
+ // Taking the address of a void variable is technically illegal, but we
+ // allow it in cases which are otherwise valid.
+ // Example: "extern void x; void* y = &x;".
+ Diag(OpLoc, diag::ext_typecheck_addrof_void) << op->getSourceRange();
+ }
+
+ // If the operand has type "type", the result has type "pointer to type".
+ return Context.getPointerType(op->getType());
+}
+
+QualType Sema::CheckIndirectionOperand(Expr *Op, SourceLocation OpLoc) {
+ if (Op->isTypeDependent())
+ return Context.DependentTy;
+
+ UsualUnaryConversions(Op);
+ QualType Ty = Op->getType();
+
+ // Note that per both C89 and C99, this is always legal, even if ptype is an
+ // incomplete type or void. It would be possible to warn about dereferencing
+ // a void pointer, but it's completely well-defined, and such a warning is
+ // unlikely to catch any mistakes.
+ if (const PointerType *PT = Ty->getAsPointerType())
+ return PT->getPointeeType();
+
+ Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer)
+ << Ty << Op->getSourceRange();
+ return QualType();
+}
+
+static inline BinaryOperator::Opcode ConvertTokenKindToBinaryOpcode(
+ tok::TokenKind Kind) {
+ BinaryOperator::Opcode Opc;
+ switch (Kind) {
+ default: assert(0 && "Unknown binop!");
+ case tok::periodstar: Opc = BinaryOperator::PtrMemD; break;
+ case tok::arrowstar: Opc = BinaryOperator::PtrMemI; break;
+ case tok::star: Opc = BinaryOperator::Mul; break;
+ case tok::slash: Opc = BinaryOperator::Div; break;
+ case tok::percent: Opc = BinaryOperator::Rem; break;
+ case tok::plus: Opc = BinaryOperator::Add; break;
+ case tok::minus: Opc = BinaryOperator::Sub; break;
+ case tok::lessless: Opc = BinaryOperator::Shl; break;
+ case tok::greatergreater: Opc = BinaryOperator::Shr; break;
+ case tok::lessequal: Opc = BinaryOperator::LE; break;
+ case tok::less: Opc = BinaryOperator::LT; break;
+ case tok::greaterequal: Opc = BinaryOperator::GE; break;
+ case tok::greater: Opc = BinaryOperator::GT; break;
+ case tok::exclaimequal: Opc = BinaryOperator::NE; break;
+ case tok::equalequal: Opc = BinaryOperator::EQ; break;
+ case tok::amp: Opc = BinaryOperator::And; break;
+ case tok::caret: Opc = BinaryOperator::Xor; break;
+ case tok::pipe: Opc = BinaryOperator::Or; break;
+ case tok::ampamp: Opc = BinaryOperator::LAnd; break;
+ case tok::pipepipe: Opc = BinaryOperator::LOr; break;
+ case tok::equal: Opc = BinaryOperator::Assign; break;
+ case tok::starequal: Opc = BinaryOperator::MulAssign; break;
+ case tok::slashequal: Opc = BinaryOperator::DivAssign; break;
+ case tok::percentequal: Opc = BinaryOperator::RemAssign; break;
+ case tok::plusequal: Opc = BinaryOperator::AddAssign; break;
+ case tok::minusequal: Opc = BinaryOperator::SubAssign; break;
+ case tok::lesslessequal: Opc = BinaryOperator::ShlAssign; break;
+ case tok::greatergreaterequal: Opc = BinaryOperator::ShrAssign; break;
+ case tok::ampequal: Opc = BinaryOperator::AndAssign; break;
+ case tok::caretequal: Opc = BinaryOperator::XorAssign; break;
+ case tok::pipeequal: Opc = BinaryOperator::OrAssign; break;
+ case tok::comma: Opc = BinaryOperator::Comma; break;
+ }
+ return Opc;
+}
+
+static inline UnaryOperator::Opcode ConvertTokenKindToUnaryOpcode(
+ tok::TokenKind Kind) {
+ UnaryOperator::Opcode Opc;
+ switch (Kind) {
+ default: assert(0 && "Unknown unary op!");
+ case tok::plusplus: Opc = UnaryOperator::PreInc; break;
+ case tok::minusminus: Opc = UnaryOperator::PreDec; break;
+ case tok::amp: Opc = UnaryOperator::AddrOf; break;
+ case tok::star: Opc = UnaryOperator::Deref; break;
+ case tok::plus: Opc = UnaryOperator::Plus; break;
+ case tok::minus: Opc = UnaryOperator::Minus; break;
+ case tok::tilde: Opc = UnaryOperator::Not; break;
+ case tok::exclaim: Opc = UnaryOperator::LNot; break;
+ case tok::kw___real: Opc = UnaryOperator::Real; break;
+ case tok::kw___imag: Opc = UnaryOperator::Imag; break;
+ case tok::kw___extension__: Opc = UnaryOperator::Extension; break;
+ }
+ return Opc;
+}
+
+/// CreateBuiltinBinOp - Creates a new built-in binary operation with
+/// operator @p Opc at location @c TokLoc. This routine only supports
+/// built-in operations; ActOnBinOp handles overloaded operators.
+Action::OwningExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
+ unsigned Op,
+ Expr *lhs, Expr *rhs) {
+ QualType ResultTy; // Result type of the binary operator.
+ BinaryOperator::Opcode Opc = (BinaryOperator::Opcode)Op;
+ // The following two variables are used for compound assignment operators
+ QualType CompLHSTy; // Type of LHS after promotions for computation
+ QualType CompResultTy; // Type of computation result
+
+ switch (Opc) {
+ case BinaryOperator::Assign:
+ ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, QualType());
+ break;
+ case BinaryOperator::PtrMemD:
+ case BinaryOperator::PtrMemI:
+ ResultTy = CheckPointerToMemberOperands(lhs, rhs, OpLoc,
+ Opc == BinaryOperator::PtrMemI);
+ break;
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ ResultTy = CheckMultiplyDivideOperands(lhs, rhs, OpLoc);
+ break;
+ case BinaryOperator::Rem:
+ ResultTy = CheckRemainderOperands(lhs, rhs, OpLoc);
+ break;
+ case BinaryOperator::Add:
+ ResultTy = CheckAdditionOperands(lhs, rhs, OpLoc);
+ break;
+ case BinaryOperator::Sub:
+ ResultTy = CheckSubtractionOperands(lhs, rhs, OpLoc);
+ break;
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ ResultTy = CheckShiftOperands(lhs, rhs, OpLoc);
+ break;
+ case BinaryOperator::LE:
+ case BinaryOperator::LT:
+ case BinaryOperator::GE:
+ case BinaryOperator::GT:
+ ResultTy = CheckCompareOperands(lhs, rhs, OpLoc, Opc, true);
+ break;
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ ResultTy = CheckCompareOperands(lhs, rhs, OpLoc, Opc, false);
+ break;
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
+ ResultTy = CheckBitwiseOperands(lhs, rhs, OpLoc);
+ break;
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr:
+ ResultTy = CheckLogicalOperands(lhs, rhs, OpLoc);
+ break;
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ CompResultTy = CheckMultiplyDivideOperands(lhs, rhs, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull())
+ ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
+ break;
+ case BinaryOperator::RemAssign:
+ CompResultTy = CheckRemainderOperands(lhs, rhs, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull())
+ ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
+ break;
+ case BinaryOperator::AddAssign:
+ CompResultTy = CheckAdditionOperands(lhs, rhs, OpLoc, &CompLHSTy);
+ if (!CompResultTy.isNull())
+ ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
+ break;
+ case BinaryOperator::SubAssign:
+ CompResultTy = CheckSubtractionOperands(lhs, rhs, OpLoc, &CompLHSTy);
+ if (!CompResultTy.isNull())
+ ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
+ break;
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ CompResultTy = CheckShiftOperands(lhs, rhs, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull())
+ ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
+ break;
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::OrAssign:
+ CompResultTy = CheckBitwiseOperands(lhs, rhs, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull())
+ ResultTy = CheckAssignmentOperands(lhs, rhs, OpLoc, CompResultTy);
+ break;
+ case BinaryOperator::Comma:
+ ResultTy = CheckCommaOperands(lhs, rhs, OpLoc);
+ break;
+ }
+ if (ResultTy.isNull())
+ return ExprError();
+ if (CompResultTy.isNull())
+ return Owned(new (Context) BinaryOperator(lhs, rhs, Opc, ResultTy, OpLoc));
+ else
+ return Owned(new (Context) CompoundAssignOperator(lhs, rhs, Opc, ResultTy,
+ CompLHSTy, CompResultTy,
+ OpLoc));
+}
+
+// Binary Operators. 'Tok' is the token for the operator.
+Action::OwningExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
+ tok::TokenKind Kind,
+ ExprArg LHS, ExprArg RHS) {
+ BinaryOperator::Opcode Opc = ConvertTokenKindToBinaryOpcode(Kind);
+ Expr *lhs = LHS.takeAs<Expr>(), *rhs = RHS.takeAs<Expr>();
+
+ assert((lhs != 0) && "ActOnBinOp(): missing left expression");
+ assert((rhs != 0) && "ActOnBinOp(): missing right expression");
+
+ if (getLangOptions().CPlusPlus &&
+ (lhs->getType()->isOverloadableType() ||
+ rhs->getType()->isOverloadableType())) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ FunctionSet Functions;
+ OverloadedOperatorKind OverOp = BinaryOperator::getOverloadedOperator(Opc);
+ if (OverOp != OO_None) {
+ LookupOverloadedOperatorName(OverOp, S, lhs->getType(), rhs->getType(),
+ Functions);
+ Expr *Args[2] = { lhs, rhs };
+ DeclarationName OpName
+ = Context.DeclarationNames.getCXXOperatorName(OverOp);
+ ArgumentDependentLookup(OpName, Args, 2, Functions);
+ }
+
+ // Build the (potentially-overloaded, potentially-dependent)
+ // binary operation.
+ return CreateOverloadedBinOp(TokLoc, Opc, Functions, lhs, rhs);
+ }
+
+ // Build a built-in binary operation.
+ return CreateBuiltinBinOp(TokLoc, Opc, lhs, rhs);
+}
+
+Action::OwningExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
+ unsigned OpcIn,
+ ExprArg InputArg) {
+ UnaryOperator::Opcode Opc = static_cast<UnaryOperator::Opcode>(OpcIn);
+
+ // FIXME: Input is modified below, but InputArg is not updated appropriately.
+ Expr *Input = (Expr *)InputArg.get();
+ QualType resultType;
+ switch (Opc) {
+ case UnaryOperator::PostInc:
+ case UnaryOperator::PostDec:
+ case UnaryOperator::OffsetOf:
+ assert(false && "Invalid unary operator");
+ break;
+
+ case UnaryOperator::PreInc:
+ case UnaryOperator::PreDec:
+ resultType = CheckIncrementDecrementOperand(Input, OpLoc,
+ Opc == UnaryOperator::PreInc);
+ break;
+ case UnaryOperator::AddrOf:
+ resultType = CheckAddressOfOperand(Input, OpLoc);
+ break;
+ case UnaryOperator::Deref:
+ DefaultFunctionArrayConversion(Input);
+ resultType = CheckIndirectionOperand(Input, OpLoc);
+ break;
+ case UnaryOperator::Plus:
+ case UnaryOperator::Minus:
+ UsualUnaryConversions(Input);
+ resultType = Input->getType();
+ if (resultType->isDependentType())
+ break;
+ if (resultType->isArithmeticType()) // C99 6.5.3.3p1
+ break;
+ else if (getLangOptions().CPlusPlus && // C++ [expr.unary.op]p6-7
+ resultType->isEnumeralType())
+ break;
+ else if (getLangOptions().CPlusPlus && // C++ [expr.unary.op]p6
+ Opc == UnaryOperator::Plus &&
+ resultType->isPointerType())
+ break;
+
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input->getSourceRange());
+ case UnaryOperator::Not: // bitwise complement
+ UsualUnaryConversions(Input);
+ resultType = Input->getType();
+ if (resultType->isDependentType())
+ break;
+ // C99 6.5.3.3p1. We allow complex int and float as a GCC extension.
+ if (resultType->isComplexType() || resultType->isComplexIntegerType())
+ // C99 does not support '~' for complex conjugation.
+ Diag(OpLoc, diag::ext_integer_complement_complex)
+ << resultType << Input->getSourceRange();
+ else if (!resultType->isIntegerType())
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input->getSourceRange());
+ break;
+ case UnaryOperator::LNot: // logical negation
+ // Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5).
+ DefaultFunctionArrayConversion(Input);
+ resultType = Input->getType();
+ if (resultType->isDependentType())
+ break;
+ if (!resultType->isScalarType()) // C99 6.5.3.3p1
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input->getSourceRange());
+ // LNot always has type int. C99 6.5.3.3p5.
+ // In C++, it's bool. C++ 5.3.1p8
+ resultType = getLangOptions().CPlusPlus ? Context.BoolTy : Context.IntTy;
+ break;
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ resultType = CheckRealImagOperand(Input, OpLoc, Opc == UnaryOperator::Real);
+ break;
+ case UnaryOperator::Extension:
+ resultType = Input->getType();
+ break;
+ }
+ if (resultType.isNull())
+ return ExprError();
+
+ InputArg.release();
+ return Owned(new (Context) UnaryOperator(Input, Opc, resultType, OpLoc));
+}
+
+// Unary Operators. 'Tok' is the token for the operator.
+Action::OwningExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Op, ExprArg input) {
+ Expr *Input = (Expr*)input.get();
+ UnaryOperator::Opcode Opc = ConvertTokenKindToUnaryOpcode(Op);
+
+ if (getLangOptions().CPlusPlus && Input->getType()->isOverloadableType()) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ FunctionSet Functions;
+ OverloadedOperatorKind OverOp = UnaryOperator::getOverloadedOperator(Opc);
+ if (OverOp != OO_None) {
+ LookupOverloadedOperatorName(OverOp, S, Input->getType(), QualType(),
+ Functions);
+ DeclarationName OpName
+ = Context.DeclarationNames.getCXXOperatorName(OverOp);
+ ArgumentDependentLookup(OpName, &Input, 1, Functions);
+ }
+
+ return CreateOverloadedUnaryOp(OpLoc, Opc, Functions, move(input));
+ }
+
+ return CreateBuiltinUnaryOp(OpLoc, Opc, move(input));
+}
+
+/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
+Sema::OwningExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc,
+ SourceLocation LabLoc,
+ IdentifierInfo *LabelII) {
+ // Look up the record for this label identifier.
+ LabelStmt *&LabelDecl = getLabelMap()[LabelII];
+
+ // If we haven't seen this label yet, create a forward reference. It
+ // will be validated and/or cleaned up in ActOnFinishFunctionBody.
+ if (LabelDecl == 0)
+ LabelDecl = new (Context) LabelStmt(LabLoc, LabelII, 0);
+
+ // Create the AST node. The address of a label always has type 'void*'.
+ return Owned(new (Context) AddrLabelExpr(OpLoc, LabLoc, LabelDecl,
+ Context.getPointerType(Context.VoidTy)));
+}
+
+Sema::OwningExprResult
+Sema::ActOnStmtExpr(SourceLocation LPLoc, StmtArg substmt,
+ SourceLocation RPLoc) { // "({..})"
+ Stmt *SubStmt = static_cast<Stmt*>(substmt.get());
+ assert(SubStmt && isa<CompoundStmt>(SubStmt) && "Invalid action invocation!");
+ CompoundStmt *Compound = cast<CompoundStmt>(SubStmt);
+
+ bool isFileScope = getCurFunctionOrMethodDecl() == 0;
+ if (isFileScope)
+ return ExprError(Diag(LPLoc, diag::err_stmtexpr_file_scope));
+
+ // FIXME: there are a variety of strange constraints to enforce here, for
+ // example, it is not possible to goto into a stmt expression apparently.
+ // More semantic analysis is needed.
+
+ // If there are sub stmts in the compound stmt, take the type of the last one
+ // as the type of the stmtexpr.
+ QualType Ty = Context.VoidTy;
+
+ if (!Compound->body_empty()) {
+ Stmt *LastStmt = Compound->body_back();
+ // If LastStmt is a label, skip down through into the body.
+ while (LabelStmt *Label = dyn_cast<LabelStmt>(LastStmt))
+ LastStmt = Label->getSubStmt();
+
+ if (Expr *LastExpr = dyn_cast<Expr>(LastStmt))
+ Ty = LastExpr->getType();
+ }
+
+ // FIXME: Check that expression type is complete/non-abstract; statement
+ // expressions are not lvalues.
+
+ substmt.release();
+ return Owned(new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc));
+}
+
+Sema::OwningExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
+ SourceLocation BuiltinLoc,
+ SourceLocation TypeLoc,
+ TypeTy *argty,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RPLoc) {
+ // FIXME: This function leaks all expressions in the offset components on
+ // error.
+ QualType ArgTy = QualType::getFromOpaquePtr(argty);
+ assert(!ArgTy.isNull() && "Missing type argument!");
+
+ bool Dependent = ArgTy->isDependentType();
+
+ // We must have at least one component that refers to the type, and the first
+ // one is known to be a field designator. Verify that the ArgTy represents
+ // a struct/union/class.
+ if (!Dependent && !ArgTy->isRecordType())
+ return ExprError(Diag(TypeLoc, diag::err_offsetof_record_type) << ArgTy);
+
+ // FIXME: Type must be complete per C99 7.17p3 because a declaring a variable
+ // with an incomplete type would be illegal.
+
+ // Otherwise, create a null pointer as the base, and iteratively process
+ // the offsetof designators.
+ QualType ArgTyPtr = Context.getPointerType(ArgTy);
+ Expr* Res = new (Context) ImplicitValueInitExpr(ArgTyPtr);
+ Res = new (Context) UnaryOperator(Res, UnaryOperator::Deref,
+ ArgTy, SourceLocation());
+
+ // offsetof with non-identifier designators (e.g. "offsetof(x, a.b[c])") are a
+ // GCC extension, diagnose them.
+ // FIXME: This diagnostic isn't actually visible because the location is in
+ // a system header!
+ if (NumComponents != 1)
+ Diag(BuiltinLoc, diag::ext_offsetof_extended_field_designator)
+ << SourceRange(CompPtr[1].LocStart, CompPtr[NumComponents-1].LocEnd);
+
+ if (!Dependent) {
+ bool DidWarnAboutNonPOD = false;
+
+ // FIXME: Dependent case loses a lot of information here. And probably
+ // leaks like a sieve.
+ for (unsigned i = 0; i != NumComponents; ++i) {
+ const OffsetOfComponent &OC = CompPtr[i];
+ if (OC.isBrackets) {
+ // Offset of an array sub-field. TODO: Should we allow vector elements?
+ const ArrayType *AT = Context.getAsArrayType(Res->getType());
+ if (!AT) {
+ Res->Destroy(Context);
+ return ExprError(Diag(OC.LocEnd, diag::err_offsetof_array_type)
+ << Res->getType());
+ }
+
+ // FIXME: C++: Verify that operator[] isn't overloaded.
+
+ // Promote the array so it looks more like a normal array subscript
+ // expression.
+ DefaultFunctionArrayConversion(Res);
+
+ // C99 6.5.2.1p1
+ Expr *Idx = static_cast<Expr*>(OC.U.E);
+ // FIXME: Leaks Res
+ if (!Idx->isTypeDependent() && !Idx->getType()->isIntegerType())
+ return ExprError(Diag(Idx->getLocStart(),
+ diag::err_typecheck_subscript_not_integer)
+ << Idx->getSourceRange());
+
+ Res = new (Context) ArraySubscriptExpr(Res, Idx, AT->getElementType(),
+ OC.LocEnd);
+ continue;
+ }
+
+ const RecordType *RC = Res->getType()->getAsRecordType();
+ if (!RC) {
+ Res->Destroy(Context);
+ return ExprError(Diag(OC.LocEnd, diag::err_offsetof_record_type)
+ << Res->getType());
+ }
+
+ // Get the decl corresponding to this.
+ RecordDecl *RD = RC->getDecl();
+ if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CRD->isPOD() && !DidWarnAboutNonPOD) {
+ ExprError(Diag(BuiltinLoc, diag::warn_offsetof_non_pod_type)
+ << SourceRange(CompPtr[0].LocStart, OC.LocEnd)
+ << Res->getType());
+ DidWarnAboutNonPOD = true;
+ }
+ }
+
+ FieldDecl *MemberDecl
+ = dyn_cast_or_null<FieldDecl>(LookupQualifiedName(RD, OC.U.IdentInfo,
+ LookupMemberName)
+ .getAsDecl());
+ // FIXME: Leaks Res
+ if (!MemberDecl)
+ return ExprError(Diag(BuiltinLoc, diag::err_typecheck_no_member)
+ << OC.U.IdentInfo << SourceRange(OC.LocStart, OC.LocEnd));
+
+ // FIXME: C++: Verify that MemberDecl isn't a static field.
+ // FIXME: Verify that MemberDecl isn't a bitfield.
+ if (cast<RecordDecl>(MemberDecl->getDeclContext())->isAnonymousStructOrUnion()) {
+ Res = BuildAnonymousStructUnionMemberReference(
+ SourceLocation(), MemberDecl, Res, SourceLocation()).takeAs<Expr>();
+ } else {
+ // MemberDecl->getType() doesn't get the right qualifiers, but it
+ // doesn't matter here.
+ Res = new (Context) MemberExpr(Res, false, MemberDecl, OC.LocEnd,
+ MemberDecl->getType().getNonReferenceType());
+ }
+ }
+ }
+
+ return Owned(new (Context) UnaryOperator(Res, UnaryOperator::OffsetOf,
+ Context.getSizeType(), BuiltinLoc));
+}
+
+
+Sema::OwningExprResult Sema::ActOnTypesCompatibleExpr(SourceLocation BuiltinLoc,
+ TypeTy *arg1,TypeTy *arg2,
+ SourceLocation RPLoc) {
+ QualType argT1 = QualType::getFromOpaquePtr(arg1);
+ QualType argT2 = QualType::getFromOpaquePtr(arg2);
+
+ assert((!argT1.isNull() && !argT2.isNull()) && "Missing type argument(s)");
+
+ if (getLangOptions().CPlusPlus) {
+ Diag(BuiltinLoc, diag::err_types_compatible_p_in_cplusplus)
+ << SourceRange(BuiltinLoc, RPLoc);
+ return ExprError();
+ }
+
+ return Owned(new (Context) TypesCompatibleExpr(Context.IntTy, BuiltinLoc,
+ argT1, argT2, RPLoc));
+}
+
+Sema::OwningExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
+ ExprArg cond,
+ ExprArg expr1, ExprArg expr2,
+ SourceLocation RPLoc) {
+ Expr *CondExpr = static_cast<Expr*>(cond.get());
+ Expr *LHSExpr = static_cast<Expr*>(expr1.get());
+ Expr *RHSExpr = static_cast<Expr*>(expr2.get());
+
+ assert((CondExpr && LHSExpr && RHSExpr) && "Missing type argument(s)");
+
+ QualType resType;
+ if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) {
+ resType = Context.DependentTy;
+ } else {
+ // The conditional expression is required to be a constant expression.
+ llvm::APSInt condEval(32);
+ SourceLocation ExpLoc;
+ if (!CondExpr->isIntegerConstantExpr(condEval, Context, &ExpLoc))
+ return ExprError(Diag(ExpLoc,
+ diag::err_typecheck_choose_expr_requires_constant)
+ << CondExpr->getSourceRange());
+
+ // If the condition is > zero, then the AST type is the same as the LSHExpr.
+ resType = condEval.getZExtValue() ? LHSExpr->getType() : RHSExpr->getType();
+ }
+
+ cond.release(); expr1.release(); expr2.release();
+ return Owned(new (Context) ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr,
+ resType, RPLoc));
+}
+
+//===----------------------------------------------------------------------===//
+// Clang Extensions.
+//===----------------------------------------------------------------------===//
+
+/// ActOnBlockStart - This callback is invoked when a block literal is started.
+void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *BlockScope) {
+ // Analyze block parameters.
+ BlockSemaInfo *BSI = new BlockSemaInfo();
+
+ // Add BSI to CurBlock.
+ BSI->PrevBlockInfo = CurBlock;
+ CurBlock = BSI;
+
+ BSI->ReturnType = 0;
+ BSI->TheScope = BlockScope;
+ BSI->hasBlockDeclRefExprs = false;
+ BSI->SavedFunctionNeedsScopeChecking = CurFunctionNeedsScopeChecking;
+ CurFunctionNeedsScopeChecking = false;
+
+ BSI->TheDecl = BlockDecl::Create(Context, CurContext, CaretLoc);
+ PushDeclContext(BlockScope, BSI->TheDecl);
+}
+
+void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
+ assert(ParamInfo.getIdentifier()==0 && "block-id should have no identifier!");
+
+ if (ParamInfo.getNumTypeObjects() == 0
+ || ParamInfo.getTypeObject(0).Kind != DeclaratorChunk::Function) {
+ ProcessDeclAttributes(CurBlock->TheDecl, ParamInfo);
+ QualType T = GetTypeForDeclarator(ParamInfo, CurScope);
+
+ if (T->isArrayType()) {
+ Diag(ParamInfo.getSourceRange().getBegin(),
+ diag::err_block_returns_array);
+ return;
+ }
+
+ // The parameter list is optional, if there was none, assume ().
+ if (!T->isFunctionType())
+ T = Context.getFunctionType(T, NULL, 0, 0, 0);
+
+ CurBlock->hasPrototype = true;
+ CurBlock->isVariadic = false;
+ // Check for a valid sentinel attribute on this block.
+ if (CurBlock->TheDecl->getAttr<SentinelAttr>()) {
+ Diag(ParamInfo.getAttributes()->getLoc(),
+ diag::warn_attribute_sentinel_not_variadic) << 1;
+ // FIXME: remove the attribute.
+ }
+ QualType RetTy = T.getTypePtr()->getAsFunctionType()->getResultType();
+
+ // Do not allow returning a objc interface by-value.
+ if (RetTy->isObjCInterfaceType()) {
+ Diag(ParamInfo.getSourceRange().getBegin(),
+ diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy;
+ return;
+ }
+ return;
+ }
+
+ // Analyze arguments to block.
+ assert(ParamInfo.getTypeObject(0).Kind == DeclaratorChunk::Function &&
+ "Not a function declarator!");
+ DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getTypeObject(0).Fun;
+
+ CurBlock->hasPrototype = FTI.hasPrototype;
+ CurBlock->isVariadic = true;
+
+ // Check for C99 6.7.5.3p10 - foo(void) is a non-varargs function that takes
+ // no arguments, not a function that takes a single void argument.
+ if (FTI.hasPrototype &&
+ FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
+ (!FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType().getCVRQualifiers()&&
+ FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType()->isVoidType())) {
+ // empty arg list, don't push any params.
+ CurBlock->isVariadic = false;
+ } else if (FTI.hasPrototype) {
+ for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i)
+ CurBlock->Params.push_back(FTI.ArgInfo[i].Param.getAs<ParmVarDecl>());
+ CurBlock->isVariadic = FTI.isVariadic;
+ }
+ CurBlock->TheDecl->setParams(Context, CurBlock->Params.data(),
+ CurBlock->Params.size());
+ CurBlock->TheDecl->setIsVariadic(CurBlock->isVariadic);
+ ProcessDeclAttributes(CurBlock->TheDecl, ParamInfo);
+ for (BlockDecl::param_iterator AI = CurBlock->TheDecl->param_begin(),
+ E = CurBlock->TheDecl->param_end(); AI != E; ++AI)
+ // If this has an identifier, add it to the scope stack.
+ if ((*AI)->getIdentifier())
+ PushOnScopeChains(*AI, CurBlock->TheScope);
+
+ // Check for a valid sentinel attribute on this block.
+ if (!CurBlock->isVariadic && CurBlock->TheDecl->getAttr<SentinelAttr>()) {
+ Diag(ParamInfo.getAttributes()->getLoc(),
+ diag::warn_attribute_sentinel_not_variadic) << 1;
+ // FIXME: remove the attribute.
+ }
+
+ // Analyze the return type.
+ QualType T = GetTypeForDeclarator(ParamInfo, CurScope);
+ QualType RetTy = T->getAsFunctionType()->getResultType();
+
+ // Do not allow returning a objc interface by-value.
+ if (RetTy->isObjCInterfaceType()) {
+ Diag(ParamInfo.getSourceRange().getBegin(),
+ diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy;
+ } else if (!RetTy->isDependentType())
+ CurBlock->ReturnType = RetTy.getTypePtr();
+}
+
+/// ActOnBlockError - If there is an error parsing a block, this callback
+/// is invoked to pop the information about the block from the action impl.
+void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
+ // Ensure that CurBlock is deleted.
+ llvm::OwningPtr<BlockSemaInfo> CC(CurBlock);
+
+ CurFunctionNeedsScopeChecking = CurBlock->SavedFunctionNeedsScopeChecking;
+
+ // Pop off CurBlock, handle nested blocks.
+ PopDeclContext();
+ CurBlock = CurBlock->PrevBlockInfo;
+ // FIXME: Delete the ParmVarDecl objects as well???
+}
+
+/// ActOnBlockStmtExpr - This is called when the body of a block statement
+/// literal was successfully completed. ^(int x){...}
+Sema::OwningExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
+ StmtArg body, Scope *CurScope) {
+ // If blocks are disabled, emit an error.
+ if (!LangOpts.Blocks)
+ Diag(CaretLoc, diag::err_blocks_disable);
+
+ // Ensure that CurBlock is deleted.
+ llvm::OwningPtr<BlockSemaInfo> BSI(CurBlock);
+
+ PopDeclContext();
+
+ // Pop off CurBlock, handle nested blocks.
+ CurBlock = CurBlock->PrevBlockInfo;
+
+ QualType RetTy = Context.VoidTy;
+ if (BSI->ReturnType)
+ RetTy = QualType(BSI->ReturnType, 0);
+
+ llvm::SmallVector<QualType, 8> ArgTypes;
+ for (unsigned i = 0, e = BSI->Params.size(); i != e; ++i)
+ ArgTypes.push_back(BSI->Params[i]->getType());
+
+ QualType BlockTy;
+ if (!BSI->hasPrototype)
+ BlockTy = Context.getFunctionNoProtoType(RetTy);
+ else
+ BlockTy = Context.getFunctionType(RetTy, ArgTypes.data(), ArgTypes.size(),
+ BSI->isVariadic, 0);
+
+ // FIXME: Check that return/parameter types are complete/non-abstract
+
+ BlockTy = Context.getBlockPointerType(BlockTy);
+
+ // If needed, diagnose invalid gotos and switches in the block.
+ if (CurFunctionNeedsScopeChecking)
+ DiagnoseInvalidJumps(static_cast<CompoundStmt*>(body.get()));
+ CurFunctionNeedsScopeChecking = BSI->SavedFunctionNeedsScopeChecking;
+
+ BSI->TheDecl->setBody(body.takeAs<CompoundStmt>());
+ return Owned(new (Context) BlockExpr(BSI->TheDecl, BlockTy,
+ BSI->hasBlockDeclRefExprs));
+}
+
+Sema::OwningExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc,
+ ExprArg expr, TypeTy *type,
+ SourceLocation RPLoc) {
+ QualType T = QualType::getFromOpaquePtr(type);
+ Expr *E = static_cast<Expr*>(expr.get());
+ Expr *OrigExpr = E;
+
+ InitBuiltinVaListType();
+
+ // Get the va_list type
+ QualType VaListType = Context.getBuiltinVaListType();
+ if (VaListType->isArrayType()) {
+ // Deal with implicit array decay; for example, on x86-64,
+ // va_list is an array, but it's supposed to decay to
+ // a pointer for va_arg.
+ VaListType = Context.getArrayDecayedType(VaListType);
+ // Make sure the input expression also decays appropriately.
+ UsualUnaryConversions(E);
+ } else {
+ // Otherwise, the va_list argument must be an l-value because
+ // it is modified by va_arg.
+ if (!E->isTypeDependent() &&
+ CheckForModifiableLvalue(E, BuiltinLoc, *this))
+ return ExprError();
+ }
+
+ if (!E->isTypeDependent() &&
+ !Context.hasSameType(VaListType, E->getType())) {
+ return ExprError(Diag(E->getLocStart(),
+ diag::err_first_argument_to_va_arg_not_of_type_va_list)
+ << OrigExpr->getType() << E->getSourceRange());
+ }
+
+ // FIXME: Check that type is complete/non-abstract
+ // FIXME: Warn if a non-POD type is passed in.
+
+ expr.release();
+ return Owned(new (Context) VAArgExpr(BuiltinLoc, E, T.getNonReferenceType(),
+ RPLoc));
+}
+
+Sema::OwningExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
+ // The type of __null will be int or long, depending on the size of
+ // pointers on the target.
+ QualType Ty;
+ if (Context.Target.getPointerWidth(0) == Context.Target.getIntWidth())
+ Ty = Context.IntTy;
+ else
+ Ty = Context.LongTy;
+
+ return Owned(new (Context) GNUNullExpr(Ty, TokenLoc));
+}
+
+bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
+ SourceLocation Loc,
+ QualType DstType, QualType SrcType,
+ Expr *SrcExpr, const char *Flavor) {
+ // Decode the result (notice that AST's are still created for extensions).
+ bool isInvalid = false;
+ unsigned DiagKind;
+ switch (ConvTy) {
+ default: assert(0 && "Unknown conversion type");
+ case Compatible: return false;
+ case PointerToInt:
+ DiagKind = diag::ext_typecheck_convert_pointer_int;
+ break;
+ case IntToPointer:
+ DiagKind = diag::ext_typecheck_convert_int_pointer;
+ break;
+ case IncompatiblePointer:
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer;
+ break;
+ case IncompatiblePointerSign:
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign;
+ break;
+ case FunctionVoidPointer:
+ DiagKind = diag::ext_typecheck_convert_pointer_void_func;
+ break;
+ case CompatiblePointerDiscardsQualifiers:
+ // If the qualifiers lost were because we were applying the
+ // (deprecated) C++ conversion from a string literal to a char*
+ // (or wchar_t*), then there was no error (C++ 4.2p2). FIXME:
+ // Ideally, this check would be performed in
+ // CheckPointerTypesForAssignment. However, that would require a
+ // bit of refactoring (so that the second argument is an
+ // expression, rather than a type), which should be done as part
+ // of a larger effort to fix CheckPointerTypesForAssignment for
+ // C++ semantics.
+ if (getLangOptions().CPlusPlus &&
+ IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType))
+ return false;
+ DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
+ break;
+ case IntToBlockPointer:
+ DiagKind = diag::err_int_to_block_pointer;
+ break;
+ case IncompatibleBlockPointer:
+ DiagKind = diag::err_typecheck_convert_incompatible_block_pointer;
+ break;
+ case IncompatibleObjCQualifiedId:
+ // FIXME: Diagnose the problem in ObjCQualifiedIdTypesAreCompatible, since
+ // it can give a more specific diagnostic.
+ DiagKind = diag::warn_incompatible_qualified_id;
+ break;
+ case IncompatibleVectors:
+ DiagKind = diag::warn_incompatible_vectors;
+ break;
+ case Incompatible:
+ DiagKind = diag::err_typecheck_convert_incompatible;
+ isInvalid = true;
+ break;
+ }
+
+ Diag(Loc, DiagKind) << DstType << SrcType << Flavor
+ << SrcExpr->getSourceRange();
+ return isInvalid;
+}
+
+bool Sema::VerifyIntegerConstantExpression(const Expr *E, llvm::APSInt *Result){
+ llvm::APSInt ICEResult;
+ if (E->isIntegerConstantExpr(ICEResult, Context)) {
+ if (Result)
+ *Result = ICEResult;
+ return false;
+ }
+
+ Expr::EvalResult EvalResult;
+
+ if (!E->Evaluate(EvalResult, Context) || !EvalResult.Val.isInt() ||
+ EvalResult.HasSideEffects) {
+ Diag(E->getExprLoc(), diag::err_expr_not_ice) << E->getSourceRange();
+
+ if (EvalResult.Diag) {
+ // We only show the note if it's not the usual "invalid subexpression"
+ // or if it's actually in a subexpression.
+ if (EvalResult.Diag != diag::note_invalid_subexpr_in_ice ||
+ E->IgnoreParens() != EvalResult.DiagExpr->IgnoreParens())
+ Diag(EvalResult.DiagLoc, EvalResult.Diag);
+ }
+
+ return true;
+ }
+
+ Diag(E->getExprLoc(), diag::ext_expr_not_ice) <<
+ E->getSourceRange();
+
+ if (EvalResult.Diag &&
+ Diags.getDiagnosticLevel(diag::ext_expr_not_ice) != Diagnostic::Ignored)
+ Diag(EvalResult.DiagLoc, EvalResult.Diag);
+
+ if (Result)
+ *Result = EvalResult.Val.getInt();
+ return false;
+}
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
new file mode 100644
index 0000000..65018da
--- /dev/null
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -0,0 +1,1603 @@
+//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SemaInherit.h"
+#include "Sema.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace clang;
+
+/// ActOnCXXConversionFunctionExpr - Parse a C++ conversion function
+/// name (e.g., operator void const *) as an expression. This is
+/// very similar to ActOnIdentifierExpr, except that instead of
+/// providing an identifier the parser provides the type of the
+/// conversion function.
+Sema::OwningExprResult
+Sema::ActOnCXXConversionFunctionExpr(Scope *S, SourceLocation OperatorLoc,
+ TypeTy *Ty, bool HasTrailingLParen,
+ const CXXScopeSpec &SS,
+ bool isAddressOfOperand) {
+ QualType ConvType = QualType::getFromOpaquePtr(Ty);
+ QualType ConvTypeCanon = Context.getCanonicalType(ConvType);
+ DeclarationName ConvName
+ = Context.DeclarationNames.getCXXConversionFunctionName(ConvTypeCanon);
+ return ActOnDeclarationNameExpr(S, OperatorLoc, ConvName, HasTrailingLParen,
+ &SS, isAddressOfOperand);
+}
+
+/// ActOnCXXOperatorFunctionIdExpr - Parse a C++ overloaded operator
+/// name (e.g., @c operator+ ) as an expression. This is very
+/// similar to ActOnIdentifierExpr, except that instead of providing
+/// an identifier the parser provides the kind of overloaded
+/// operator that was parsed.
+Sema::OwningExprResult
+Sema::ActOnCXXOperatorFunctionIdExpr(Scope *S, SourceLocation OperatorLoc,
+ OverloadedOperatorKind Op,
+ bool HasTrailingLParen,
+ const CXXScopeSpec &SS,
+ bool isAddressOfOperand) {
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(Op);
+ return ActOnDeclarationNameExpr(S, OperatorLoc, Name, HasTrailingLParen, &SS,
+ isAddressOfOperand);
+}
+
+/// ActOnCXXTypeidOfType - Parse typeid( type-id ).
+Action::OwningExprResult
+Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
+ bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
+ NamespaceDecl *StdNs = GetStdNamespace();
+ if (!StdNs)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
+
+ IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
+ Decl *TypeInfoDecl = LookupQualifiedName(StdNs, TypeInfoII, LookupTagName);
+ RecordDecl *TypeInfoRecordDecl = dyn_cast_or_null<RecordDecl>(TypeInfoDecl);
+ if (!TypeInfoRecordDecl)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
+
+ QualType TypeInfoType = Context.getTypeDeclType(TypeInfoRecordDecl);
+
+ return Owned(new (Context) CXXTypeidExpr(isType, TyOrExpr,
+ TypeInfoType.withConst(),
+ SourceRange(OpLoc, RParenLoc)));
+}
+
+/// ActOnCXXBoolLiteral - Parse {true,false} literals.
+Action::OwningExprResult
+Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
+ assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
+ "Unknown C++ Boolean value!");
+ return Owned(new (Context) CXXBoolLiteralExpr(Kind == tok::kw_true,
+ Context.BoolTy, OpLoc));
+}
+
+/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
+Action::OwningExprResult
+Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
+ return Owned(new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc));
+}
+
+/// ActOnCXXThrow - Parse throw expressions.
+Action::OwningExprResult
+Sema::ActOnCXXThrow(SourceLocation OpLoc, ExprArg E) {
+ Expr *Ex = E.takeAs<Expr>();
+ if (Ex && !Ex->isTypeDependent() && CheckCXXThrowOperand(OpLoc, Ex))
+ return ExprError();
+ return Owned(new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc));
+}
+
+/// CheckCXXThrowOperand - Validate the operand of a throw.
+bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E) {
+ // C++ [except.throw]p3:
+ // [...] adjusting the type from "array of T" or "function returning T"
+ // to "pointer to T" or "pointer to function returning T", [...]
+ DefaultFunctionArrayConversion(E);
+
+ // If the type of the exception would be an incomplete type or a pointer
+ // to an incomplete type other than (cv) void the program is ill-formed.
+ QualType Ty = E->getType();
+ int isPointer = 0;
+ if (const PointerType* Ptr = Ty->getAsPointerType()) {
+ Ty = Ptr->getPointeeType();
+ isPointer = 1;
+ }
+ if (!isPointer || !Ty->isVoidType()) {
+ if (RequireCompleteType(ThrowLoc, Ty,
+ isPointer ? diag::err_throw_incomplete_ptr
+ : diag::err_throw_incomplete,
+ E->getSourceRange(), SourceRange(), QualType()))
+ return true;
+ }
+
+ // FIXME: Construct a temporary here.
+ return false;
+}
+
+Action::OwningExprResult Sema::ActOnCXXThis(SourceLocation ThisLoc) {
+ /// C++ 9.3.2: In the body of a non-static member function, the keyword this
+ /// is a non-lvalue expression whose value is the address of the object for
+ /// which the function is called.
+
+ if (!isa<FunctionDecl>(CurContext))
+ return ExprError(Diag(ThisLoc, diag::err_invalid_this_use));
+
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext))
+ if (MD->isInstance())
+ return Owned(new (Context) CXXThisExpr(ThisLoc,
+ MD->getThisType(Context)));
+
+ return ExprError(Diag(ThisLoc, diag::err_invalid_this_use));
+}
+
+/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
+Action::OwningExprResult
+Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep,
+ SourceLocation LParenLoc,
+ MultiExprArg exprs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ assert(TypeRep && "Missing type!");
+ QualType Ty = QualType::getFromOpaquePtr(TypeRep);
+ unsigned NumExprs = exprs.size();
+ Expr **Exprs = (Expr**)exprs.get();
+ SourceLocation TyBeginLoc = TypeRange.getBegin();
+ SourceRange FullRange = SourceRange(TyBeginLoc, RParenLoc);
+
+ if (Ty->isDependentType() ||
+ CallExpr::hasAnyTypeDependentArguments(Exprs, NumExprs)) {
+ exprs.release();
+
+ return Owned(CXXUnresolvedConstructExpr::Create(Context,
+ TypeRange.getBegin(), Ty,
+ LParenLoc,
+ Exprs, NumExprs,
+ RParenLoc));
+ }
+
+
+ // C++ [expr.type.conv]p1:
+ // If the expression list is a single expression, the type conversion
+ // expression is equivalent (in definedness, and if defined in meaning) to the
+ // corresponding cast expression.
+ //
+ if (NumExprs == 1) {
+ if (CheckCastTypes(TypeRange, Ty, Exprs[0]))
+ return ExprError();
+ exprs.release();
+ return Owned(new (Context) CXXFunctionalCastExpr(Ty.getNonReferenceType(),
+ Ty, TyBeginLoc, Exprs[0],
+ RParenLoc));
+ }
+
+ if (const RecordType *RT = Ty->getAsRecordType()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+
+ // FIXME: We should always create a CXXTemporaryObjectExpr here unless
+ // both the ctor and dtor are trivial.
+ if (NumExprs > 1 || Record->hasUserDeclaredConstructor()) {
+ CXXConstructorDecl *Constructor
+ = PerformInitializationByConstructor(Ty, Exprs, NumExprs,
+ TypeRange.getBegin(),
+ SourceRange(TypeRange.getBegin(),
+ RParenLoc),
+ DeclarationName(),
+ IK_Direct);
+
+ if (!Constructor)
+ return ExprError();
+
+ exprs.release();
+ Expr *E = new (Context) CXXTemporaryObjectExpr(Context, Constructor,
+ Ty, TyBeginLoc, Exprs,
+ NumExprs, RParenLoc);
+ return MaybeBindToTemporary(E);
+ }
+
+ // Fall through to value-initialize an object of class type that
+ // doesn't have a user-declared default constructor.
+ }
+
+ // C++ [expr.type.conv]p1:
+ // If the expression list specifies more than a single value, the type shall
+ // be a class with a suitably declared constructor.
+ //
+ if (NumExprs > 1)
+ return ExprError(Diag(CommaLocs[0],
+ diag::err_builtin_func_cast_more_than_one_arg)
+ << FullRange);
+
+ assert(NumExprs == 0 && "Expected 0 expressions");
+
+ // C++ [expr.type.conv]p2:
+ // The expression T(), where T is a simple-type-specifier for a non-array
+ // complete object type or the (possibly cv-qualified) void type, creates an
+ // rvalue of the specified type, which is value-initialized.
+ //
+ if (Ty->isArrayType())
+ return ExprError(Diag(TyBeginLoc,
+ diag::err_value_init_for_array_type) << FullRange);
+ if (!Ty->isDependentType() && !Ty->isVoidType() &&
+ RequireCompleteType(TyBeginLoc, Ty,
+ diag::err_invalid_incomplete_type_use, FullRange))
+ return ExprError();
+
+ if (RequireNonAbstractType(TyBeginLoc, Ty,
+ diag::err_allocation_of_abstract_type))
+ return ExprError();
+
+ exprs.release();
+ return Owned(new (Context) CXXZeroInitValueExpr(Ty, TyBeginLoc, RParenLoc));
+}
+
+
+/// ActOnCXXNew - Parsed a C++ 'new' expression (C++ 5.3.4), as in e.g.:
+/// @code new (memory) int[size][4] @endcode
+/// or
+/// @code ::new Foo(23, "hello") @endcode
+/// For the interpretation of this heap of arguments, consult the base version.
+Action::OwningExprResult
+Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen, bool ParenTypeId,
+ Declarator &D, SourceLocation ConstructorLParen,
+ MultiExprArg ConstructorArgs,
+ SourceLocation ConstructorRParen)
+{
+ Expr *ArraySize = 0;
+ unsigned Skip = 0;
+ // If the specified type is an array, unwrap it and save the expression.
+ if (D.getNumTypeObjects() > 0 &&
+ D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
+ DeclaratorChunk &Chunk = D.getTypeObject(0);
+ if (Chunk.Arr.hasStatic)
+ return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
+ << D.getSourceRange());
+ if (!Chunk.Arr.NumElts)
+ return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
+ << D.getSourceRange());
+ ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
+ Skip = 1;
+ }
+
+ QualType AllocType = GetTypeForDeclarator(D, /*Scope=*/0, Skip);
+ if (D.isInvalidType())
+ return ExprError();
+
+ // Every dimension shall be of constant size.
+ unsigned i = 1;
+ QualType ElementType = AllocType;
+ while (const ArrayType *Array = Context.getAsArrayType(ElementType)) {
+ if (!Array->isConstantArrayType()) {
+ Diag(D.getTypeObject(i).Loc, diag::err_new_array_nonconst)
+ << static_cast<Expr*>(D.getTypeObject(i).Arr.NumElts)->getSourceRange();
+ return ExprError();
+ }
+ ElementType = Array->getElementType();
+ ++i;
+ }
+
+ return BuildCXXNew(StartLoc, UseGlobal,
+ PlacementLParen,
+ move(PlacementArgs),
+ PlacementRParen,
+ ParenTypeId,
+ AllocType,
+ D.getSourceRange().getBegin(),
+ D.getSourceRange(),
+ Owned(ArraySize),
+ ConstructorLParen,
+ move(ConstructorArgs),
+ ConstructorRParen);
+}
+
+Sema::OwningExprResult
+Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ bool ParenTypeId,
+ QualType AllocType,
+ SourceLocation TypeLoc,
+ SourceRange TypeRange,
+ ExprArg ArraySizeE,
+ SourceLocation ConstructorLParen,
+ MultiExprArg ConstructorArgs,
+ SourceLocation ConstructorRParen) {
+ if (CheckAllocatedType(AllocType, TypeLoc, TypeRange))
+ return ExprError();
+
+ QualType ResultType = Context.getPointerType(AllocType);
+
+ // That every array dimension except the first is constant was already
+ // checked by the type check above.
+
+ // C++ 5.3.4p6: "The expression in a direct-new-declarator shall have integral
+ // or enumeration type with a non-negative value."
+ Expr *ArraySize = (Expr *)ArraySizeE.get();
+ if (ArraySize && !ArraySize->isTypeDependent()) {
+ QualType SizeType = ArraySize->getType();
+ if (!SizeType->isIntegralType() && !SizeType->isEnumeralType())
+ return ExprError(Diag(ArraySize->getSourceRange().getBegin(),
+ diag::err_array_size_not_integral)
+ << SizeType << ArraySize->getSourceRange());
+ // Let's see if this is a constant < 0. If so, we reject it out of hand.
+ // We don't care about special rules, so we tell the machinery it's not
+ // evaluated - it gives us a result in more cases.
+ if (!ArraySize->isValueDependent()) {
+ llvm::APSInt Value;
+ if (ArraySize->isIntegerConstantExpr(Value, Context, 0, false)) {
+ if (Value < llvm::APSInt(
+ llvm::APInt::getNullValue(Value.getBitWidth()), false))
+ return ExprError(Diag(ArraySize->getSourceRange().getBegin(),
+ diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange());
+ }
+ }
+ }
+
+ FunctionDecl *OperatorNew = 0;
+ FunctionDecl *OperatorDelete = 0;
+ Expr **PlaceArgs = (Expr**)PlacementArgs.get();
+ unsigned NumPlaceArgs = PlacementArgs.size();
+ if (!AllocType->isDependentType() &&
+ !Expr::hasAnyTypeDependentArguments(PlaceArgs, NumPlaceArgs) &&
+ FindAllocationFunctions(StartLoc,
+ SourceRange(PlacementLParen, PlacementRParen),
+ UseGlobal, AllocType, ArraySize, PlaceArgs,
+ NumPlaceArgs, OperatorNew, OperatorDelete))
+ return ExprError();
+
+ bool Init = ConstructorLParen.isValid();
+ // --- Choosing a constructor ---
+ // C++ 5.3.4p15
+ // 1) If T is a POD and there's no initializer (ConstructorLParen is invalid)
+ // the object is not initialized. If the object, or any part of it, is
+ // const-qualified, it's an error.
+ // 2) If T is a POD and there's an empty initializer, the object is value-
+ // initialized.
+ // 3) If T is a POD and there's one initializer argument, the object is copy-
+ // constructed.
+ // 4) If T is a POD and there's more initializer arguments, it's an error.
+ // 5) If T is not a POD, the initializer arguments are used as constructor
+ // arguments.
+ //
+ // Or by the C++0x formulation:
+ // 1) If there's no initializer, the object is default-initialized according
+ // to C++0x rules.
+ // 2) Otherwise, the object is direct-initialized.
+ CXXConstructorDecl *Constructor = 0;
+ Expr **ConsArgs = (Expr**)ConstructorArgs.get();
+ const RecordType *RT;
+ unsigned NumConsArgs = ConstructorArgs.size();
+ if (AllocType->isDependentType()) {
+ // Skip all the checks.
+ }
+ else if ((RT = AllocType->getAsRecordType()) &&
+ !AllocType->isAggregateType()) {
+ Constructor = PerformInitializationByConstructor(
+ AllocType, ConsArgs, NumConsArgs,
+ TypeLoc,
+ SourceRange(TypeLoc, ConstructorRParen),
+ RT->getDecl()->getDeclName(),
+ NumConsArgs != 0 ? IK_Direct : IK_Default);
+ if (!Constructor)
+ return ExprError();
+ } else {
+ if (!Init) {
+ // FIXME: Check that no subpart is const.
+ if (AllocType.isConstQualified())
+ return ExprError(Diag(StartLoc, diag::err_new_uninitialized_const)
+ << TypeRange);
+ } else if (NumConsArgs == 0) {
+ // Object is value-initialized. Do nothing.
+ } else if (NumConsArgs == 1) {
+ // Object is direct-initialized.
+ // FIXME: What DeclarationName do we pass in here?
+ if (CheckInitializerTypes(ConsArgs[0], AllocType, StartLoc,
+ DeclarationName() /*AllocType.getAsString()*/,
+ /*DirectInit=*/true))
+ return ExprError();
+ } else {
+ return ExprError(Diag(StartLoc,
+ diag::err_builtin_direct_init_more_than_one_arg)
+ << SourceRange(ConstructorLParen, ConstructorRParen));
+ }
+ }
+
+ // FIXME: Also check that the destructor is accessible. (C++ 5.3.4p16)
+
+ PlacementArgs.release();
+ ConstructorArgs.release();
+ ArraySizeE.release();
+ return Owned(new (Context) CXXNewExpr(UseGlobal, OperatorNew, PlaceArgs,
+ NumPlaceArgs, ParenTypeId, ArraySize, Constructor, Init,
+ ConsArgs, NumConsArgs, OperatorDelete, ResultType,
+ StartLoc, Init ? ConstructorRParen : SourceLocation()));
+}
+
+/// CheckAllocatedType - Checks that a type is suitable as the allocated type
+/// in a new-expression.
+/// dimension off and stores the size expression in ArraySize.
+bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
+ SourceRange R)
+{
+ // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
+ // abstract class type or array thereof.
+ if (AllocType->isFunctionType())
+ return Diag(Loc, diag::err_bad_new_type)
+ << AllocType << 0 << R;
+ else if (AllocType->isReferenceType())
+ return Diag(Loc, diag::err_bad_new_type)
+ << AllocType << 1 << R;
+ else if (!AllocType->isDependentType() &&
+ RequireCompleteType(Loc, AllocType,
+ diag::err_new_incomplete_type,
+ R))
+ return true;
+ else if (RequireNonAbstractType(Loc, AllocType,
+ diag::err_allocation_of_abstract_type))
+ return true;
+
+ return false;
+}
+
+/// FindAllocationFunctions - Finds the overloads of operator new and delete
+/// that are appropriate for the allocation.
+bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
+ bool UseGlobal, QualType AllocType,
+ bool IsArray, Expr **PlaceArgs,
+ unsigned NumPlaceArgs,
+ FunctionDecl *&OperatorNew,
+ FunctionDecl *&OperatorDelete)
+{
+ // --- Choosing an allocation function ---
+ // C++ 5.3.4p8 - 14 & 18
+ // 1) If UseGlobal is true, only look in the global scope. Else, also look
+ // in the scope of the allocated class.
+ // 2) If an array size is given, look for operator new[], else look for
+ // operator new.
+ // 3) The first argument is always size_t. Append the arguments from the
+ // placement form.
+ // FIXME: Also find the appropriate delete operator.
+
+ llvm::SmallVector<Expr*, 8> AllocArgs(1 + NumPlaceArgs);
+ // We don't care about the actual value of this argument.
+ // FIXME: Should the Sema create the expression and embed it in the syntax
+ // tree? Or should the consumer just recalculate the value?
+ AllocArgs[0] = new (Context) IntegerLiteral(llvm::APInt::getNullValue(
+ Context.Target.getPointerWidth(0)),
+ Context.getSizeType(),
+ SourceLocation());
+ std::copy(PlaceArgs, PlaceArgs + NumPlaceArgs, AllocArgs.begin() + 1);
+
+ DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
+ IsArray ? OO_Array_New : OO_New);
+ if (AllocType->isRecordType() && !UseGlobal) {
+ CXXRecordDecl *Record
+ = cast<CXXRecordDecl>(AllocType->getAsRecordType()->getDecl());
+ // FIXME: We fail to find inherited overloads.
+ if (FindAllocationOverload(StartLoc, Range, NewName, &AllocArgs[0],
+ AllocArgs.size(), Record, /*AllowMissing=*/true,
+ OperatorNew))
+ return true;
+ }
+ if (!OperatorNew) {
+ // Didn't find a member overload. Look for a global one.
+ DeclareGlobalNewDelete();
+ DeclContext *TUDecl = Context.getTranslationUnitDecl();
+ if (FindAllocationOverload(StartLoc, Range, NewName, &AllocArgs[0],
+ AllocArgs.size(), TUDecl, /*AllowMissing=*/false,
+ OperatorNew))
+ return true;
+ }
+
+ // FindAllocationOverload can change the passed in arguments, so we need to
+ // copy them back.
+ if (NumPlaceArgs > 0)
+ std::copy(&AllocArgs[1], AllocArgs.end(), PlaceArgs);
+
+ // FIXME: This is leaked on error. But so much is currently in Sema that it's
+ // easier to clean it in one go.
+ AllocArgs[0]->Destroy(Context);
+ return false;
+}
+
+/// FindAllocationOverload - Find an fitting overload for the allocation
+/// function in the specified scope.
+bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
+ DeclarationName Name, Expr** Args,
+ unsigned NumArgs, DeclContext *Ctx,
+ bool AllowMissing, FunctionDecl *&Operator)
+{
+ DeclContext::lookup_iterator Alloc, AllocEnd;
+ llvm::tie(Alloc, AllocEnd) = Ctx->lookup(Context, Name);
+ if (Alloc == AllocEnd) {
+ if (AllowMissing)
+ return false;
+ return Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
+ << Name << Range;
+ }
+
+ OverloadCandidateSet Candidates;
+ for (; Alloc != AllocEnd; ++Alloc) {
+ // Even member operator new/delete are implicitly treated as
+ // static, so don't use AddMemberCandidate.
+ if (FunctionDecl *Fn = dyn_cast<FunctionDecl>(*Alloc))
+ AddOverloadCandidate(Fn, Args, NumArgs, Candidates,
+ /*SuppressUserConversions=*/false);
+ }
+
+ // Do the resolution.
+ OverloadCandidateSet::iterator Best;
+ switch(BestViableFunction(Candidates, Best)) {
+ case OR_Success: {
+ // Got one!
+ FunctionDecl *FnDecl = Best->Function;
+ // The first argument is size_t, and the first parameter must be size_t,
+ // too. This is checked on declaration and can be assumed. (It can't be
+ // asserted on, though, since invalid decls are left in there.)
+ for (unsigned i = 1; i < NumArgs; ++i) {
+ // FIXME: Passing word to diagnostic.
+ if (PerformCopyInitialization(Args[i],
+ FnDecl->getParamDecl(i)->getType(),
+ "passing"))
+ return true;
+ }
+ Operator = FnDecl;
+ return false;
+ }
+
+ case OR_No_Viable_Function:
+ Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
+ << Name << Range;
+ PrintOverloadCandidates(Candidates, /*OnlyViable=*/false);
+ return true;
+
+ case OR_Ambiguous:
+ Diag(StartLoc, diag::err_ovl_ambiguous_call)
+ << Name << Range;
+ PrintOverloadCandidates(Candidates, /*OnlyViable=*/true);
+ return true;
+
+ case OR_Deleted:
+ Diag(StartLoc, diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted()
+ << Name << Range;
+ PrintOverloadCandidates(Candidates, /*OnlyViable=*/true);
+ return true;
+ }
+ assert(false && "Unreachable, bad result from BestViableFunction");
+ return true;
+}
+
+
+/// DeclareGlobalNewDelete - Declare the global forms of operator new and
+/// delete. These are:
+/// @code
+/// void* operator new(std::size_t) throw(std::bad_alloc);
+/// void* operator new[](std::size_t) throw(std::bad_alloc);
+/// void operator delete(void *) throw();
+/// void operator delete[](void *) throw();
+/// @endcode
+/// Note that the placement and nothrow forms of new are *not* implicitly
+/// declared. Their use requires including \<new\>.
+void Sema::DeclareGlobalNewDelete()
+{
+ if (GlobalNewDeleteDeclared)
+ return;
+ GlobalNewDeleteDeclared = true;
+
+ QualType VoidPtr = Context.getPointerType(Context.VoidTy);
+ QualType SizeT = Context.getSizeType();
+
+ // FIXME: Exception specifications are not added.
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_New),
+ VoidPtr, SizeT);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Array_New),
+ VoidPtr, SizeT);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete),
+ Context.VoidTy, VoidPtr);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete),
+ Context.VoidTy, VoidPtr);
+}
+
+/// DeclareGlobalAllocationFunction - Declares a single implicit global
+/// allocation function if it doesn't already exist.
+void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
+ QualType Return, QualType Argument)
+{
+ DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
+
+ // Check if this function is already declared.
+ {
+ DeclContext::lookup_iterator Alloc, AllocEnd;
+ for (llvm::tie(Alloc, AllocEnd) = GlobalCtx->lookup(Context, Name);
+ Alloc != AllocEnd; ++Alloc) {
+ // FIXME: Do we need to check for default arguments here?
+ FunctionDecl *Func = cast<FunctionDecl>(*Alloc);
+ if (Func->getNumParams() == 1 &&
+ Context.getCanonicalType(Func->getParamDecl(0)->getType())==Argument)
+ return;
+ }
+ }
+
+ QualType FnType = Context.getFunctionType(Return, &Argument, 1, false, 0);
+ FunctionDecl *Alloc =
+ FunctionDecl::Create(Context, GlobalCtx, SourceLocation(), Name,
+ FnType, FunctionDecl::None, false, true,
+ SourceLocation());
+ Alloc->setImplicit();
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, Alloc, SourceLocation(),
+ 0, Argument, VarDecl::None, 0);
+ Alloc->setParams(Context, &Param, 1);
+
+ // FIXME: Also add this declaration to the IdentifierResolver, but
+ // make sure it is at the end of the chain to coincide with the
+ // global scope.
+ ((DeclContext *)TUScope->getEntity())->addDecl(Context, Alloc);
+}
+
+/// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in:
+/// @code ::delete ptr; @endcode
+/// or
+/// @code delete [] ptr; @endcode
+Action::OwningExprResult
+Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
+ bool ArrayForm, ExprArg Operand)
+{
+ // C++ 5.3.5p1: "The operand shall have a pointer type, or a class type
+ // having a single conversion function to a pointer type. The result has
+ // type void."
+ // DR599 amends "pointer type" to "pointer to object type" in both cases.
+
+ Expr *Ex = (Expr *)Operand.get();
+ if (!Ex->isTypeDependent()) {
+ QualType Type = Ex->getType();
+
+ if (Type->isRecordType()) {
+ // FIXME: Find that one conversion function and amend the type.
+ }
+
+ if (!Type->isPointerType())
+ return ExprError(Diag(StartLoc, diag::err_delete_operand)
+ << Type << Ex->getSourceRange());
+
+ QualType Pointee = Type->getAsPointerType()->getPointeeType();
+ if (Pointee->isFunctionType() || Pointee->isVoidType())
+ return ExprError(Diag(StartLoc, diag::err_delete_operand)
+ << Type << Ex->getSourceRange());
+ else if (!Pointee->isDependentType() &&
+ RequireCompleteType(StartLoc, Pointee,
+ diag::warn_delete_incomplete,
+ Ex->getSourceRange()))
+ return ExprError();
+
+ // FIXME: Look up the correct operator delete overload and pass a pointer
+ // along.
+ // FIXME: Check access and ambiguity of operator delete and destructor.
+ }
+
+ Operand.release();
+ return Owned(new (Context) CXXDeleteExpr(Context.VoidTy, UseGlobal, ArrayForm,
+ 0, Ex, StartLoc));
+}
+
+
+/// ActOnCXXConditionDeclarationExpr - Parsed a condition declaration of a
+/// C++ if/switch/while/for statement.
+/// e.g: "if (int x = f()) {...}"
+Action::OwningExprResult
+Sema::ActOnCXXConditionDeclarationExpr(Scope *S, SourceLocation StartLoc,
+ Declarator &D,
+ SourceLocation EqualLoc,
+ ExprArg AssignExprVal) {
+ assert(AssignExprVal.get() && "Null assignment expression");
+
+ // C++ 6.4p2:
+ // The declarator shall not specify a function or an array.
+ // The type-specifier-seq shall not contain typedef and shall not declare a
+ // new class or enumeration.
+
+ assert(D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ "Parser allowed 'typedef' as storage class of condition decl.");
+
+ QualType Ty = GetTypeForDeclarator(D, S);
+
+ if (Ty->isFunctionType()) { // The declarator shall not specify a function...
+ // We exit without creating a CXXConditionDeclExpr because a FunctionDecl
+ // would be created and CXXConditionDeclExpr wants a VarDecl.
+ return ExprError(Diag(StartLoc, diag::err_invalid_use_of_function_type)
+ << SourceRange(StartLoc, EqualLoc));
+ } else if (Ty->isArrayType()) { // ...or an array.
+ Diag(StartLoc, diag::err_invalid_use_of_array_type)
+ << SourceRange(StartLoc, EqualLoc);
+ } else if (const RecordType *RT = Ty->getAsRecordType()) {
+ RecordDecl *RD = RT->getDecl();
+ // The type-specifier-seq shall not declare a new class...
+ if (RD->isDefinition() &&
+ (RD->getIdentifier() == 0 || S->isDeclScope(DeclPtrTy::make(RD))))
+ Diag(RD->getLocation(), diag::err_type_defined_in_condition);
+ } else if (const EnumType *ET = Ty->getAsEnumType()) {
+ EnumDecl *ED = ET->getDecl();
+ // ...or enumeration.
+ if (ED->isDefinition() &&
+ (ED->getIdentifier() == 0 || S->isDeclScope(DeclPtrTy::make(ED))))
+ Diag(ED->getLocation(), diag::err_type_defined_in_condition);
+ }
+
+ DeclPtrTy Dcl = ActOnDeclarator(S, D, DeclPtrTy());
+ if (!Dcl)
+ return ExprError();
+ AddInitializerToDecl(Dcl, move(AssignExprVal), /*DirectInit=*/false);
+
+ // Mark this variable as one that is declared within a conditional.
+ // We know that the decl had to be a VarDecl because that is the only type of
+ // decl that can be assigned and the grammar requires an '='.
+ VarDecl *VD = cast<VarDecl>(Dcl.getAs<Decl>());
+ VD->setDeclaredInCondition(true);
+ return Owned(new (Context) CXXConditionDeclExpr(StartLoc, EqualLoc, VD));
+}
+
+/// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid.
+bool Sema::CheckCXXBooleanCondition(Expr *&CondExpr) {
+ // C++ 6.4p4:
+ // The value of a condition that is an initialized declaration in a statement
+ // other than a switch statement is the value of the declared variable
+ // implicitly converted to type bool. If that conversion is ill-formed, the
+ // program is ill-formed.
+ // The value of a condition that is an expression is the value of the
+ // expression, implicitly converted to bool.
+ //
+ return PerformContextuallyConvertToBool(CondExpr);
+}
+
+/// Helper function to determine whether this is the (deprecated) C++
+/// conversion from a string literal to a pointer to non-const char or
+/// non-const wchar_t (for narrow and wide string literals,
+/// respectively).
+bool
+Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
+ // Look inside the implicit cast, if it exists.
+ if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(From))
+ From = Cast->getSubExpr();
+
+ // A string literal (2.13.4) that is not a wide string literal can
+ // be converted to an rvalue of type "pointer to char"; a wide
+ // string literal can be converted to an rvalue of type "pointer
+ // to wchar_t" (C++ 4.2p2).
+ if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From))
+ if (const PointerType *ToPtrType = ToType->getAsPointerType())
+ if (const BuiltinType *ToPointeeType
+ = ToPtrType->getPointeeType()->getAsBuiltinType()) {
+ // This conversion is considered only when there is an
+ // explicit appropriate pointer target type (C++ 4.2p2).
+ if (ToPtrType->getPointeeType().getCVRQualifiers() == 0 &&
+ ((StrLit->isWide() && ToPointeeType->isWideCharType()) ||
+ (!StrLit->isWide() &&
+ (ToPointeeType->getKind() == BuiltinType::Char_U ||
+ ToPointeeType->getKind() == BuiltinType::Char_S))))
+ return true;
+ }
+
+ return false;
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType. Returns true if there was an
+/// error, false otherwise. The expression From is replaced with the
+/// converted expression. Flavor is the kind of conversion we're
+/// performing, used in the error message. If @p AllowExplicit,
+/// explicit user-defined conversions are permitted. @p Elidable should be true
+/// when called for copies which may be elided (C++ 12.8p15). C++0x overload
+/// resolution works differently in that case.
+bool
+Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
+ const char *Flavor, bool AllowExplicit,
+ bool Elidable)
+{
+ ImplicitConversionSequence ICS;
+ ICS.ConversionKind = ImplicitConversionSequence::BadConversion;
+ if (Elidable && getLangOptions().CPlusPlus0x) {
+ ICS = TryImplicitConversion(From, ToType, /*SuppressUserConversions*/false,
+ AllowExplicit, /*ForceRValue*/true);
+ }
+ if (ICS.ConversionKind == ImplicitConversionSequence::BadConversion) {
+ ICS = TryImplicitConversion(From, ToType, false, AllowExplicit);
+ }
+ return PerformImplicitConversion(From, ToType, ICS, Flavor);
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType using the pre-computed implicit
+/// conversion sequence ICS. Returns true if there was an error, false
+/// otherwise. The expression From is replaced with the converted
+/// expression. Flavor is the kind of conversion we're performing,
+/// used in the error message.
+bool
+Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
+ const ImplicitConversionSequence &ICS,
+ const char* Flavor) {
+ switch (ICS.ConversionKind) {
+ case ImplicitConversionSequence::StandardConversion:
+ if (PerformImplicitConversion(From, ToType, ICS.Standard, Flavor))
+ return true;
+ break;
+
+ case ImplicitConversionSequence::UserDefinedConversion:
+ // FIXME: This is, of course, wrong. We'll need to actually call the
+ // constructor or conversion operator, and then cope with the standard
+ // conversions.
+ ImpCastExprToType(From, ToType.getNonReferenceType(),
+ ToType->isLValueReferenceType());
+ return false;
+
+ case ImplicitConversionSequence::EllipsisConversion:
+ assert(false && "Cannot perform an ellipsis conversion");
+ return false;
+
+ case ImplicitConversionSequence::BadConversion:
+ return true;
+ }
+
+ // Everything went well.
+ return false;
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType by following the standard
+/// conversion sequence SCS. Returns true if there was an error, false
+/// otherwise. The expression From is replaced with the converted
+/// expression. Flavor is the context in which we're performing this
+/// conversion, for use in error messages.
+bool
+Sema::PerformImplicitConversion(Expr *&From, QualType ToType,
+ const StandardConversionSequence& SCS,
+ const char *Flavor) {
+ // Overall FIXME: we are recomputing too many types here and doing far too
+ // much extra work. What this means is that we need to keep track of more
+ // information that is computed when we try the implicit conversion initially,
+ // so that we don't need to recompute anything here.
+ QualType FromType = From->getType();
+
+ if (SCS.CopyConstructor) {
+ // FIXME: When can ToType be a reference type?
+ assert(!ToType->isReferenceType());
+
+ // FIXME: Keep track of whether the copy constructor is elidable or not.
+ From = CXXConstructExpr::Create(Context, ToType,
+ SCS.CopyConstructor, false, &From, 1);
+ return false;
+ }
+
+ // Perform the first implicit conversion.
+ switch (SCS.First) {
+ case ICK_Identity:
+ case ICK_Lvalue_To_Rvalue:
+ // Nothing to do.
+ break;
+
+ case ICK_Array_To_Pointer:
+ FromType = Context.getArrayDecayedType(FromType);
+ ImpCastExprToType(From, FromType);
+ break;
+
+ case ICK_Function_To_Pointer:
+ if (Context.getCanonicalType(FromType) == Context.OverloadTy) {
+ FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(From, ToType, true);
+ if (!Fn)
+ return true;
+
+ if (DiagnoseUseOfDecl(Fn, From->getSourceRange().getBegin()))
+ return true;
+
+ FixOverloadedFunctionReference(From, Fn);
+ FromType = From->getType();
+ }
+ FromType = Context.getPointerType(FromType);
+ ImpCastExprToType(From, FromType);
+ break;
+
+ default:
+ assert(false && "Improper first standard conversion");
+ break;
+ }
+
+ // Perform the second implicit conversion
+ switch (SCS.Second) {
+ case ICK_Identity:
+ // Nothing to do.
+ break;
+
+ case ICK_Integral_Promotion:
+ case ICK_Floating_Promotion:
+ case ICK_Complex_Promotion:
+ case ICK_Integral_Conversion:
+ case ICK_Floating_Conversion:
+ case ICK_Complex_Conversion:
+ case ICK_Floating_Integral:
+ case ICK_Complex_Real:
+ case ICK_Compatible_Conversion:
+ // FIXME: Go deeper to get the unqualified type!
+ FromType = ToType.getUnqualifiedType();
+ ImpCastExprToType(From, FromType);
+ break;
+
+ case ICK_Pointer_Conversion:
+ if (SCS.IncompatibleObjC) {
+ // Diagnose incompatible Objective-C conversions
+ Diag(From->getSourceRange().getBegin(),
+ diag::ext_typecheck_convert_incompatible_pointer)
+ << From->getType() << ToType << Flavor
+ << From->getSourceRange();
+ }
+
+ if (CheckPointerConversion(From, ToType))
+ return true;
+ ImpCastExprToType(From, ToType);
+ break;
+
+ case ICK_Pointer_Member:
+ if (CheckMemberPointerConversion(From, ToType))
+ return true;
+ ImpCastExprToType(From, ToType);
+ break;
+
+ case ICK_Boolean_Conversion:
+ FromType = Context.BoolTy;
+ ImpCastExprToType(From, FromType);
+ break;
+
+ default:
+ assert(false && "Improper second standard conversion");
+ break;
+ }
+
+ switch (SCS.Third) {
+ case ICK_Identity:
+ // Nothing to do.
+ break;
+
+ case ICK_Qualification:
+ // FIXME: Not sure about lvalue vs rvalue here in the presence of rvalue
+ // references.
+ ImpCastExprToType(From, ToType.getNonReferenceType(),
+ ToType->isLValueReferenceType());
+ break;
+
+ default:
+ assert(false && "Improper second standard conversion");
+ break;
+ }
+
+ return false;
+}
+
+Sema::OwningExprResult Sema::ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ SourceLocation LParen,
+ TypeTy *Ty,
+ SourceLocation RParen) {
+ // FIXME: Some of the type traits have requirements. Interestingly, only the
+ // __is_base_of requirement is explicitly stated to be diagnosed. Indeed, G++
+ // accepts __is_pod(Incomplete) without complaints, and claims that the type
+ // is indeed a POD.
+
+ // There is no point in eagerly computing the value. The traits are designed
+ // to be used from type trait templates, so Ty will be a template parameter
+ // 99% of the time.
+ return Owned(new (Context) UnaryTypeTraitExpr(KWLoc, OTT,
+ QualType::getFromOpaquePtr(Ty),
+ RParen, Context.BoolTy));
+}
+
+QualType Sema::CheckPointerToMemberOperands(
+ Expr *&lex, Expr *&rex, SourceLocation Loc, bool isIndirect)
+{
+ const char *OpSpelling = isIndirect ? "->*" : ".*";
+ // C++ 5.5p2
+ // The binary operator .* [p3: ->*] binds its second operand, which shall
+ // be of type "pointer to member of T" (where T is a completely-defined
+ // class type) [...]
+ QualType RType = rex->getType();
+ const MemberPointerType *MemPtr = RType->getAsMemberPointerType();
+ if (!MemPtr) {
+ Diag(Loc, diag::err_bad_memptr_rhs)
+ << OpSpelling << RType << rex->getSourceRange();
+ return QualType();
+ }
+
+ QualType Class(MemPtr->getClass(), 0);
+
+ // C++ 5.5p2
+ // [...] to its first operand, which shall be of class T or of a class of
+ // which T is an unambiguous and accessible base class. [p3: a pointer to
+ // such a class]
+ QualType LType = lex->getType();
+ if (isIndirect) {
+ if (const PointerType *Ptr = LType->getAsPointerType())
+ LType = Ptr->getPointeeType().getNonReferenceType();
+ else {
+ Diag(Loc, diag::err_bad_memptr_lhs)
+ << OpSpelling << 1 << LType << lex->getSourceRange();
+ return QualType();
+ }
+ }
+
+ if (Context.getCanonicalType(Class).getUnqualifiedType() !=
+ Context.getCanonicalType(LType).getUnqualifiedType()) {
+ BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+ // FIXME: Would it be useful to print full ambiguity paths, or is that
+ // overkill?
+ if (!IsDerivedFrom(LType, Class, Paths) ||
+ Paths.isAmbiguous(Context.getCanonicalType(Class))) {
+ Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling
+ << (int)isIndirect << lex->getType() << lex->getSourceRange();
+ return QualType();
+ }
+ }
+
+ // C++ 5.5p2
+ // The result is an object or a function of the type specified by the
+ // second operand.
+ // The cv qualifiers are the union of those in the pointer and the left side,
+ // in accordance with 5.5p5 and 5.2.5.
+ // FIXME: This returns a dereferenced member function pointer as a normal
+ // function type. However, the only operation valid on such functions is
+ // calling them. There's also a GCC extension to get a function pointer to the
+ // thing, which is another complication, because this type - unlike the type
+ // that is the result of this expression - takes the class as the first
+ // argument.
+ // We probably need a "MemberFunctionClosureType" or something like that.
+ QualType Result = MemPtr->getPointeeType();
+ if (LType.isConstQualified())
+ Result.addConst();
+ if (LType.isVolatileQualified())
+ Result.addVolatile();
+ return Result;
+}
+
+/// \brief Get the target type of a standard or user-defined conversion.
+static QualType TargetType(const ImplicitConversionSequence &ICS) {
+ assert((ICS.ConversionKind ==
+ ImplicitConversionSequence::StandardConversion ||
+ ICS.ConversionKind ==
+ ImplicitConversionSequence::UserDefinedConversion) &&
+ "function only valid for standard or user-defined conversions");
+ if (ICS.ConversionKind == ImplicitConversionSequence::StandardConversion)
+ return QualType::getFromOpaquePtr(ICS.Standard.ToTypePtr);
+ return QualType::getFromOpaquePtr(ICS.UserDefined.After.ToTypePtr);
+}
+
+/// \brief Try to convert a type to another according to C++0x 5.16p3.
+///
+/// This is part of the parameter validation for the ? operator. If either
+/// value operand is a class type, the two operands are attempted to be
+/// converted to each other. This function does the conversion in one direction.
+/// It emits a diagnostic and returns true only if it finds an ambiguous
+/// conversion.
+static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
+ SourceLocation QuestionLoc,
+ ImplicitConversionSequence &ICS)
+{
+ // C++0x 5.16p3
+ // The process for determining whether an operand expression E1 of type T1
+ // can be converted to match an operand expression E2 of type T2 is defined
+ // as follows:
+ // -- If E2 is an lvalue:
+ if (To->isLvalue(Self.Context) == Expr::LV_Valid) {
+ // E1 can be converted to match E2 if E1 can be implicitly converted to
+ // type "lvalue reference to T2", subject to the constraint that in the
+ // conversion the reference must bind directly to E1.
+ if (!Self.CheckReferenceInit(From,
+ Self.Context.getLValueReferenceType(To->getType()),
+ &ICS))
+ {
+ assert((ICS.ConversionKind ==
+ ImplicitConversionSequence::StandardConversion ||
+ ICS.ConversionKind ==
+ ImplicitConversionSequence::UserDefinedConversion) &&
+ "expected a definite conversion");
+ bool DirectBinding =
+ ICS.ConversionKind == ImplicitConversionSequence::StandardConversion ?
+ ICS.Standard.DirectBinding : ICS.UserDefined.After.DirectBinding;
+ if (DirectBinding)
+ return false;
+ }
+ }
+ ICS.ConversionKind = ImplicitConversionSequence::BadConversion;
+ // -- If E2 is an rvalue, or if the conversion above cannot be done:
+ // -- if E1 and E2 have class type, and the underlying class types are
+ // the same or one is a base class of the other:
+ QualType FTy = From->getType();
+ QualType TTy = To->getType();
+ const RecordType *FRec = FTy->getAsRecordType();
+ const RecordType *TRec = TTy->getAsRecordType();
+ bool FDerivedFromT = FRec && TRec && Self.IsDerivedFrom(FTy, TTy);
+ if (FRec && TRec && (FRec == TRec ||
+ FDerivedFromT || Self.IsDerivedFrom(TTy, FTy))) {
+ // E1 can be converted to match E2 if the class of T2 is the
+ // same type as, or a base class of, the class of T1, and
+ // [cv2 > cv1].
+ if ((FRec == TRec || FDerivedFromT) && TTy.isAtLeastAsQualifiedAs(FTy)) {
+ // Could still fail if there's no copy constructor.
+ // FIXME: Is this a hard error then, or just a conversion failure? The
+ // standard doesn't say.
+ ICS = Self.TryCopyInitialization(From, TTy);
+ }
+ } else {
+ // -- Otherwise: E1 can be converted to match E2 if E1 can be
+ // implicitly converted to the type that expression E2 would have
+ // if E2 were converted to an rvalue.
+ // First find the decayed type.
+ if (TTy->isFunctionType())
+ TTy = Self.Context.getPointerType(TTy);
+ else if(TTy->isArrayType())
+ TTy = Self.Context.getArrayDecayedType(TTy);
+
+ // Now try the implicit conversion.
+ // FIXME: This doesn't detect ambiguities.
+ ICS = Self.TryImplicitConversion(From, TTy);
+ }
+ return false;
+}
+
+/// \brief Try to find a common type for two according to C++0x 5.16p5.
+///
+/// This is part of the parameter validation for the ? operator. If either
+/// value operand is a class type, overload resolution is used to find a
+/// conversion to a common type.
+static bool FindConditionalOverload(Sema &Self, Expr *&LHS, Expr *&RHS,
+ SourceLocation Loc) {
+ Expr *Args[2] = { LHS, RHS };
+ OverloadCandidateSet CandidateSet;
+ Self.AddBuiltinOperatorCandidates(OO_Conditional, Args, 2, CandidateSet);
+
+ OverloadCandidateSet::iterator Best;
+ switch (Self.BestViableFunction(CandidateSet, Best)) {
+ case Sema::OR_Success:
+ // We found a match. Perform the conversions on the arguments and move on.
+ if (Self.PerformImplicitConversion(LHS, Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], "converting") ||
+ Self.PerformImplicitConversion(RHS, Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], "converting"))
+ break;
+ return false;
+
+ case Sema::OR_No_Viable_Function:
+ Self.Diag(Loc, diag::err_typecheck_cond_incompatible_operands)
+ << LHS->getType() << RHS->getType()
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return true;
+
+ case Sema::OR_Ambiguous:
+ Self.Diag(Loc, diag::err_conditional_ambiguous_ovl)
+ << LHS->getType() << RHS->getType()
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ // FIXME: Print the possible common types by printing the return types of
+ // the viable candidates.
+ break;
+
+ case Sema::OR_Deleted:
+ assert(false && "Conditional operator has only built-in overloads");
+ break;
+ }
+ return true;
+}
+
+/// \brief Perform an "extended" implicit conversion as returned by
+/// TryClassUnification.
+///
+/// TryClassUnification generates ICSs that include reference bindings.
+/// PerformImplicitConversion is not suitable for this; it chokes if the
+/// second part of a standard conversion is ICK_DerivedToBase. This function
+/// handles the reference binding specially.
+static bool ConvertForConditional(Sema &Self, Expr *&E,
+ const ImplicitConversionSequence &ICS)
+{
+ if (ICS.ConversionKind == ImplicitConversionSequence::StandardConversion &&
+ ICS.Standard.ReferenceBinding) {
+ assert(ICS.Standard.DirectBinding &&
+ "TryClassUnification should never generate indirect ref bindings");
+ // FIXME: CheckReferenceInit should be able to reuse the ICS instead of
+ // redoing all the work.
+ return Self.CheckReferenceInit(E, Self.Context.getLValueReferenceType(
+ TargetType(ICS)));
+ }
+ if (ICS.ConversionKind == ImplicitConversionSequence::UserDefinedConversion &&
+ ICS.UserDefined.After.ReferenceBinding) {
+ assert(ICS.UserDefined.After.DirectBinding &&
+ "TryClassUnification should never generate indirect ref bindings");
+ return Self.CheckReferenceInit(E, Self.Context.getLValueReferenceType(
+ TargetType(ICS)));
+ }
+ if (Self.PerformImplicitConversion(E, TargetType(ICS), ICS, "converting"))
+ return true;
+ return false;
+}
+
+/// \brief Check the operands of ?: under C++ semantics.
+///
+/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
+/// extension. In this case, LHS == Cond. (But they're not aliases.)
+QualType Sema::CXXCheckConditionalOperands(Expr *&Cond, Expr *&LHS, Expr *&RHS,
+ SourceLocation QuestionLoc) {
+ // FIXME: Handle C99's complex types, vector types, block pointers and Obj-C++
+ // interface pointers.
+
+ // C++0x 5.16p1
+ // The first expression is contextually converted to bool.
+ if (!Cond->isTypeDependent()) {
+ if (CheckCXXBooleanCondition(Cond))
+ return QualType();
+ }
+
+ // Either of the arguments dependent?
+ if (LHS->isTypeDependent() || RHS->isTypeDependent())
+ return Context.DependentTy;
+
+ // C++0x 5.16p2
+ // If either the second or the third operand has type (cv) void, ...
+ QualType LTy = LHS->getType();
+ QualType RTy = RHS->getType();
+ bool LVoid = LTy->isVoidType();
+ bool RVoid = RTy->isVoidType();
+ if (LVoid || RVoid) {
+ // ... then the [l2r] conversions are performed on the second and third
+ // operands ...
+ DefaultFunctionArrayConversion(LHS);
+ DefaultFunctionArrayConversion(RHS);
+ LTy = LHS->getType();
+ RTy = RHS->getType();
+
+ // ... and one of the following shall hold:
+ // -- The second or the third operand (but not both) is a throw-
+ // expression; the result is of the type of the other and is an rvalue.
+ bool LThrow = isa<CXXThrowExpr>(LHS);
+ bool RThrow = isa<CXXThrowExpr>(RHS);
+ if (LThrow && !RThrow)
+ return RTy;
+ if (RThrow && !LThrow)
+ return LTy;
+
+ // -- Both the second and third operands have type void; the result is of
+ // type void and is an rvalue.
+ if (LVoid && RVoid)
+ return Context.VoidTy;
+
+ // Neither holds, error.
+ Diag(QuestionLoc, diag::err_conditional_void_nonvoid)
+ << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return QualType();
+ }
+
+ // Neither is void.
+
+ // C++0x 5.16p3
+ // Otherwise, if the second and third operand have different types, and
+ // either has (cv) class type, and attempt is made to convert each of those
+ // operands to the other.
+ if (Context.getCanonicalType(LTy) != Context.getCanonicalType(RTy) &&
+ (LTy->isRecordType() || RTy->isRecordType())) {
+ ImplicitConversionSequence ICSLeftToRight, ICSRightToLeft;
+ // These return true if a single direction is already ambiguous.
+ if (TryClassUnification(*this, LHS, RHS, QuestionLoc, ICSLeftToRight))
+ return QualType();
+ if (TryClassUnification(*this, RHS, LHS, QuestionLoc, ICSRightToLeft))
+ return QualType();
+
+ bool HaveL2R = ICSLeftToRight.ConversionKind !=
+ ImplicitConversionSequence::BadConversion;
+ bool HaveR2L = ICSRightToLeft.ConversionKind !=
+ ImplicitConversionSequence::BadConversion;
+ // If both can be converted, [...] the program is ill-formed.
+ if (HaveL2R && HaveR2L) {
+ Diag(QuestionLoc, diag::err_conditional_ambiguous)
+ << LTy << RTy << LHS->getSourceRange() << RHS->getSourceRange();
+ return QualType();
+ }
+
+ // If exactly one conversion is possible, that conversion is applied to
+ // the chosen operand and the converted operands are used in place of the
+ // original operands for the remainder of this section.
+ if (HaveL2R) {
+ if (ConvertForConditional(*this, LHS, ICSLeftToRight))
+ return QualType();
+ LTy = LHS->getType();
+ } else if (HaveR2L) {
+ if (ConvertForConditional(*this, RHS, ICSRightToLeft))
+ return QualType();
+ RTy = RHS->getType();
+ }
+ }
+
+ // C++0x 5.16p4
+ // If the second and third operands are lvalues and have the same type,
+ // the result is of that type [...]
+ bool Same = Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy);
+ if (Same && LHS->isLvalue(Context) == Expr::LV_Valid &&
+ RHS->isLvalue(Context) == Expr::LV_Valid)
+ return LTy;
+
+ // C++0x 5.16p5
+ // Otherwise, the result is an rvalue. If the second and third operands
+ // do not have the same type, and either has (cv) class type, ...
+ if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
+ // ... overload resolution is used to determine the conversions (if any)
+ // to be applied to the operands. If the overload resolution fails, the
+ // program is ill-formed.
+ if (FindConditionalOverload(*this, LHS, RHS, QuestionLoc))
+ return QualType();
+ }
+
+ // C++0x 5.16p6
+ // LValue-to-rvalue, array-to-pointer, and function-to-pointer standard
+ // conversions are performed on the second and third operands.
+ DefaultFunctionArrayConversion(LHS);
+ DefaultFunctionArrayConversion(RHS);
+ LTy = LHS->getType();
+ RTy = RHS->getType();
+
+ // After those conversions, one of the following shall hold:
+ // -- The second and third operands have the same type; the result
+ // is of that type.
+ if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy))
+ return LTy;
+
+ // -- The second and third operands have arithmetic or enumeration type;
+ // the usual arithmetic conversions are performed to bring them to a
+ // common type, and the result is of that type.
+ if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
+ UsualArithmeticConversions(LHS, RHS);
+ return LHS->getType();
+ }
+
+ // -- The second and third operands have pointer type, or one has pointer
+ // type and the other is a null pointer constant; pointer conversions
+ // and qualification conversions are performed to bring them to their
+ // composite pointer type. The result is of the composite pointer type.
+ QualType Composite = FindCompositePointerType(LHS, RHS);
+ if (!Composite.isNull())
+ return Composite;
+
+ // Fourth bullet is same for pointers-to-member. However, the possible
+ // conversions are far more limited: we have null-to-pointer, upcast of
+ // containing class, and second-level cv-ness.
+ // cv-ness is not a union, but must match one of the two operands. (Which,
+ // frankly, is stupid.)
+ const MemberPointerType *LMemPtr = LTy->getAsMemberPointerType();
+ const MemberPointerType *RMemPtr = RTy->getAsMemberPointerType();
+ if (LMemPtr && RHS->isNullPointerConstant(Context)) {
+ ImpCastExprToType(RHS, LTy);
+ return LTy;
+ }
+ if (RMemPtr && LHS->isNullPointerConstant(Context)) {
+ ImpCastExprToType(LHS, RTy);
+ return RTy;
+ }
+ if (LMemPtr && RMemPtr) {
+ QualType LPointee = LMemPtr->getPointeeType();
+ QualType RPointee = RMemPtr->getPointeeType();
+ // First, we check that the unqualified pointee type is the same. If it's
+ // not, there's no conversion that will unify the two pointers.
+ if (Context.getCanonicalType(LPointee).getUnqualifiedType() ==
+ Context.getCanonicalType(RPointee).getUnqualifiedType()) {
+ // Second, we take the greater of the two cv qualifications. If neither
+ // is greater than the other, the conversion is not possible.
+ unsigned Q = LPointee.getCVRQualifiers() | RPointee.getCVRQualifiers();
+ if (Q == LPointee.getCVRQualifiers() || Q == RPointee.getCVRQualifiers()){
+ // Third, we check if either of the container classes is derived from
+ // the other.
+ QualType LContainer(LMemPtr->getClass(), 0);
+ QualType RContainer(RMemPtr->getClass(), 0);
+ QualType MoreDerived;
+ if (Context.getCanonicalType(LContainer) ==
+ Context.getCanonicalType(RContainer))
+ MoreDerived = LContainer;
+ else if (IsDerivedFrom(LContainer, RContainer))
+ MoreDerived = LContainer;
+ else if (IsDerivedFrom(RContainer, LContainer))
+ MoreDerived = RContainer;
+
+ if (!MoreDerived.isNull()) {
+ // The type 'Q Pointee (MoreDerived::*)' is the common type.
+ // We don't use ImpCastExprToType here because this could still fail
+ // for ambiguous or inaccessible conversions.
+ QualType Common = Context.getMemberPointerType(
+ LPointee.getQualifiedType(Q), MoreDerived.getTypePtr());
+ if (PerformImplicitConversion(LHS, Common, "converting"))
+ return QualType();
+ if (PerformImplicitConversion(RHS, Common, "converting"))
+ return QualType();
+ return Common;
+ }
+ }
+ }
+ }
+
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHS->getType() << RHS->getType()
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return QualType();
+}
+
+/// \brief Find a merged pointer type and convert the two expressions to it.
+///
+/// This finds the composite pointer type for @p E1 and @p E2 according to
+/// C++0x 5.9p2. It converts both expressions to this type and returns it.
+/// It does not emit diagnostics.
+QualType Sema::FindCompositePointerType(Expr *&E1, Expr *&E2) {
+ assert(getLangOptions().CPlusPlus && "This function assumes C++");
+ QualType T1 = E1->getType(), T2 = E2->getType();
+ if(!T1->isPointerType() && !T2->isPointerType())
+ return QualType();
+
+ // C++0x 5.9p2
+ // Pointer conversions and qualification conversions are performed on
+ // pointer operands to bring them to their composite pointer type. If
+ // one operand is a null pointer constant, the composite pointer type is
+ // the type of the other operand.
+ if (E1->isNullPointerConstant(Context)) {
+ ImpCastExprToType(E1, T2);
+ return T2;
+ }
+ if (E2->isNullPointerConstant(Context)) {
+ ImpCastExprToType(E2, T1);
+ return T1;
+ }
+ // Now both have to be pointers.
+ if(!T1->isPointerType() || !T2->isPointerType())
+ return QualType();
+
+ // Otherwise, of one of the operands has type "pointer to cv1 void," then
+ // the other has type "pointer to cv2 T" and the composite pointer type is
+ // "pointer to cv12 void," where cv12 is the union of cv1 and cv2.
+ // Otherwise, the composite pointer type is a pointer type similar to the
+ // type of one of the operands, with a cv-qualification signature that is
+ // the union of the cv-qualification signatures of the operand types.
+ // In practice, the first part here is redundant; it's subsumed by the second.
+ // What we do here is, we build the two possible composite types, and try the
+ // conversions in both directions. If only one works, or if the two composite
+ // types are the same, we have succeeded.
+ llvm::SmallVector<unsigned, 4> QualifierUnion;
+ QualType Composite1 = T1, Composite2 = T2;
+ const PointerType *Ptr1, *Ptr2;
+ while ((Ptr1 = Composite1->getAsPointerType()) &&
+ (Ptr2 = Composite2->getAsPointerType())) {
+ Composite1 = Ptr1->getPointeeType();
+ Composite2 = Ptr2->getPointeeType();
+ QualifierUnion.push_back(
+ Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers());
+ }
+ // Rewrap the composites as pointers with the union CVRs.
+ for (llvm::SmallVector<unsigned, 4>::iterator I = QualifierUnion.begin(),
+ E = QualifierUnion.end(); I != E; ++I) {
+ Composite1 = Context.getPointerType(Composite1.getQualifiedType(*I));
+ Composite2 = Context.getPointerType(Composite2.getQualifiedType(*I));
+ }
+
+ ImplicitConversionSequence E1ToC1 = TryImplicitConversion(E1, Composite1);
+ ImplicitConversionSequence E2ToC1 = TryImplicitConversion(E2, Composite1);
+ ImplicitConversionSequence E1ToC2, E2ToC2;
+ E1ToC2.ConversionKind = ImplicitConversionSequence::BadConversion;
+ E2ToC2.ConversionKind = ImplicitConversionSequence::BadConversion;
+ if (Context.getCanonicalType(Composite1) !=
+ Context.getCanonicalType(Composite2)) {
+ E1ToC2 = TryImplicitConversion(E1, Composite2);
+ E2ToC2 = TryImplicitConversion(E2, Composite2);
+ }
+
+ bool ToC1Viable = E1ToC1.ConversionKind !=
+ ImplicitConversionSequence::BadConversion
+ && E2ToC1.ConversionKind !=
+ ImplicitConversionSequence::BadConversion;
+ bool ToC2Viable = E1ToC2.ConversionKind !=
+ ImplicitConversionSequence::BadConversion
+ && E2ToC2.ConversionKind !=
+ ImplicitConversionSequence::BadConversion;
+ if (ToC1Viable && !ToC2Viable) {
+ if (!PerformImplicitConversion(E1, Composite1, E1ToC1, "converting") &&
+ !PerformImplicitConversion(E2, Composite1, E2ToC1, "converting"))
+ return Composite1;
+ }
+ if (ToC2Viable && !ToC1Viable) {
+ if (!PerformImplicitConversion(E1, Composite2, E1ToC2, "converting") &&
+ !PerformImplicitConversion(E2, Composite2, E2ToC2, "converting"))
+ return Composite2;
+ }
+ return QualType();
+}
+
+Sema::OwningExprResult Sema::MaybeBindToTemporary(Expr *E) {
+ const RecordType *RT = E->getType()->getAsRecordType();
+ if (!RT)
+ return Owned(E);
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialDestructor())
+ return Owned(E);
+
+ CXXTemporary *Temp = CXXTemporary::Create(Context,
+ RD->getDestructor(Context));
+ ExprTemporaries.push_back(Temp);
+
+ // FIXME: Add the temporary to the temporaries vector.
+ return Owned(CXXBindTemporaryExpr::Create(Context, Temp, E));
+}
+
+// FIXME: This doesn't handle casts yet.
+Expr *Sema::RemoveOutermostTemporaryBinding(Expr *E) {
+ const RecordType *RT = E->getType()->getAsRecordType();
+ if (!RT)
+ return E;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialDestructor())
+ return E;
+
+ /// The expr passed in must be a CXXExprWithTemporaries.
+ CXXExprWithTemporaries *TempExpr = dyn_cast<CXXExprWithTemporaries>(E);
+ if (!TempExpr)
+ return E;
+
+ Expr *SubExpr = TempExpr->getSubExpr();
+ if (CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(SubExpr)) {
+ assert(BE->getTemporary() ==
+ TempExpr->getTemporary(TempExpr->getNumTemporaries() - 1) &&
+ "Found temporary is not last in list!");
+
+ Expr *BindSubExpr = BE->getSubExpr();
+ BE->setSubExpr(0);
+
+ if (TempExpr->getNumTemporaries() == 1) {
+ // There's just one temporary left, so we don't need the TempExpr node.
+ TempExpr->Destroy(Context);
+ return BindSubExpr;
+ } else {
+ TempExpr->removeLastTemporary();
+ TempExpr->setSubExpr(BindSubExpr);
+ BE->Destroy(Context);
+ }
+
+ return E;
+ }
+
+ // FIXME: We might need to handle other expressions here.
+ return E;
+}
+
+Sema::OwningExprResult Sema::ActOnFinishFullExpr(ExprArg Arg) {
+ Expr *FullExpr = Arg.takeAs<Expr>();
+
+ if (FullExpr && !ExprTemporaries.empty()) {
+ // Create a cleanup expr.
+ FullExpr = CXXExprWithTemporaries::Create(Context, FullExpr,
+ &ExprTemporaries[0],
+ ExprTemporaries.size());
+ ExprTemporaries.clear();
+ }
+
+ return Owned(FullExpr);
+}
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
new file mode 100644
index 0000000..eabc87d
--- /dev/null
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -0,0 +1,860 @@
+//===--- SemaExprObjC.cpp - Semantic Analysis for ObjC Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective-C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprObjC.h"
+#include "llvm/ADT/SmallString.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+
+Sema::ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
+ ExprTy **strings,
+ unsigned NumStrings) {
+ StringLiteral **Strings = reinterpret_cast<StringLiteral**>(strings);
+
+ // Most ObjC strings are formed out of a single piece. However, we *can*
+ // have strings formed out of multiple @ strings with multiple pptokens in
+ // each one, e.g. @"foo" "bar" @"baz" "qux" which need to be turned into one
+ // StringLiteral for ObjCStringLiteral to hold onto.
+ StringLiteral *S = Strings[0];
+
+ // If we have a multi-part string, merge it all together.
+ if (NumStrings != 1) {
+ // Concatenate objc strings.
+ llvm::SmallString<128> StrBuf;
+ llvm::SmallVector<SourceLocation, 8> StrLocs;
+
+ for (unsigned i = 0; i != NumStrings; ++i) {
+ S = Strings[i];
+
+ // ObjC strings can't be wide.
+ if (S->isWide()) {
+ Diag(S->getLocStart(), diag::err_cfstring_literal_not_string_constant)
+ << S->getSourceRange();
+ return true;
+ }
+
+ // Get the string data.
+ StrBuf.append(S->getStrData(), S->getStrData()+S->getByteLength());
+
+ // Get the locations of the string tokens.
+ StrLocs.append(S->tokloc_begin(), S->tokloc_end());
+
+ // Free the temporary string.
+ S->Destroy(Context);
+ }
+
+ // Create the aggregate string with the appropriate content and location
+ // information.
+ S = StringLiteral::Create(Context, &StrBuf[0], StrBuf.size(), false,
+ Context.getPointerType(Context.CharTy),
+ &StrLocs[0], StrLocs.size());
+ }
+
+ // Verify that this composite string is acceptable for ObjC strings.
+ if (CheckObjCString(S))
+ return true;
+
+ // Initialize the constant string interface lazily. This assumes
+ // the NSString interface is seen in this translation unit. Note: We
+ // don't use NSConstantString, since the runtime team considers this
+ // interface private (even though it appears in the header files).
+ QualType Ty = Context.getObjCConstantStringInterface();
+ if (!Ty.isNull()) {
+ Ty = Context.getPointerType(Ty);
+ } else {
+ IdentifierInfo *NSIdent = &Context.Idents.get("NSString");
+ NamedDecl *IF = LookupName(TUScope, NSIdent, LookupOrdinaryName);
+ if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
+ Context.setObjCConstantStringInterface(StrIF);
+ Ty = Context.getObjCConstantStringInterface();
+ Ty = Context.getPointerType(Ty);
+ } else {
+ // If there is no NSString interface defined then treat constant
+ // strings as untyped objects and let the runtime figure it out later.
+ Ty = Context.getObjCIdType();
+ }
+ }
+
+ return new (Context) ObjCStringLiteral(S, Ty, AtLocs[0]);
+}
+
+Sema::ExprResult Sema::ParseObjCEncodeExpression(SourceLocation AtLoc,
+ SourceLocation EncodeLoc,
+ SourceLocation LParenLoc,
+ TypeTy *ty,
+ SourceLocation RParenLoc) {
+ QualType EncodedType = QualType::getFromOpaquePtr(ty);
+
+ std::string Str;
+ Context.getObjCEncodingForType(EncodedType, Str);
+
+ // The type of @encode is the same as the type of the corresponding string,
+ // which is an array type.
+ QualType StrTy = Context.CharTy;
+ // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
+ if (getLangOptions().CPlusPlus)
+ StrTy.addConst();
+ StrTy = Context.getConstantArrayType(StrTy, llvm::APInt(32, Str.size()+1),
+ ArrayType::Normal, 0);
+
+ return new (Context) ObjCEncodeExpr(StrTy, EncodedType, AtLoc, RParenLoc);
+}
+
+Sema::ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
+ SourceLocation AtLoc,
+ SourceLocation SelLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ QualType Ty = Context.getObjCSelType();
+ return new (Context) ObjCSelectorExpr(Ty, Sel, AtLoc, RParenLoc);
+}
+
+Sema::ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
+ SourceLocation AtLoc,
+ SourceLocation ProtoLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ ObjCProtocolDecl* PDecl = LookupProtocol(ProtocolId);
+ if (!PDecl) {
+ Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
+ return true;
+ }
+
+ QualType Ty = Context.getObjCProtoType();
+ if (Ty.isNull())
+ return true;
+ Ty = Context.getPointerType(Ty);
+ return new (Context) ObjCProtocolExpr(Ty, PDecl, AtLoc, RParenLoc);
+}
+
+bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs,
+ Selector Sel, ObjCMethodDecl *Method,
+ bool isClassMessage,
+ SourceLocation lbrac, SourceLocation rbrac,
+ QualType &ReturnType) {
+ if (!Method) {
+ // Apply default argument promotion as for (C99 6.5.2.2p6).
+ for (unsigned i = 0; i != NumArgs; i++)
+ DefaultArgumentPromotion(Args[i]);
+
+ unsigned DiagID = isClassMessage ? diag::warn_class_method_not_found :
+ diag::warn_inst_method_not_found;
+ Diag(lbrac, DiagID)
+ << Sel << isClassMessage << SourceRange(lbrac, rbrac);
+ ReturnType = Context.getObjCIdType();
+ return false;
+ }
+
+ ReturnType = Method->getResultType();
+
+ unsigned NumNamedArgs = Sel.getNumArgs();
+ assert(NumArgs >= NumNamedArgs && "Too few arguments for selector!");
+
+ bool IsError = false;
+ for (unsigned i = 0; i < NumNamedArgs; i++) {
+ Expr *argExpr = Args[i];
+ assert(argExpr && "CheckMessageArgumentTypes(): missing expression");
+
+ QualType lhsType = Method->param_begin()[i]->getType();
+ QualType rhsType = argExpr->getType();
+
+ // If necessary, apply function/array conversion. C99 6.7.5.3p[7,8].
+ if (lhsType->isArrayType())
+ lhsType = Context.getArrayDecayedType(lhsType);
+ else if (lhsType->isFunctionType())
+ lhsType = Context.getPointerType(lhsType);
+
+ AssignConvertType Result =
+ CheckSingleAssignmentConstraints(lhsType, argExpr);
+ if (Args[i] != argExpr) // The expression was converted.
+ Args[i] = argExpr; // Make sure we store the converted expression.
+
+ IsError |=
+ DiagnoseAssignmentResult(Result, argExpr->getLocStart(), lhsType, rhsType,
+ argExpr, "sending");
+ }
+
+ // Promote additional arguments to variadic methods.
+ if (Method->isVariadic()) {
+ for (unsigned i = NumNamedArgs; i < NumArgs; ++i)
+ IsError |= DefaultVariadicArgumentPromotion(Args[i], VariadicMethod);
+ } else {
+ // Check for extra arguments to non-variadic methods.
+ if (NumArgs != NumNamedArgs) {
+ Diag(Args[NumNamedArgs]->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 2 /*method*/ << Method->getSourceRange()
+ << SourceRange(Args[NumNamedArgs]->getLocStart(),
+ Args[NumArgs-1]->getLocEnd());
+ }
+ }
+
+ return IsError;
+}
+
+bool Sema::isSelfExpr(Expr *RExpr) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RExpr))
+ if (DRE->getDecl()->getIdentifier() == &Context.Idents.get("self"))
+ return true;
+ return false;
+}
+
+// Helper method for ActOnClassMethod/ActOnInstanceMethod.
+// Will search "local" class/category implementations for a method decl.
+// If failed, then we search in class's root for an instance method.
+// Returns 0 if no method is found.
+ObjCMethodDecl *Sema::LookupPrivateClassMethod(Selector Sel,
+ ObjCInterfaceDecl *ClassDecl) {
+ ObjCMethodDecl *Method = 0;
+ // lookup in class and all superclasses
+ while (ClassDecl && !Method) {
+ if (ObjCImplementationDecl *ImpDecl
+ = LookupObjCImplementation(ClassDecl->getIdentifier()))
+ Method = ImpDecl->getClassMethod(Context, Sel);
+
+ // Look through local category implementations associated with the class.
+ if (!Method) {
+ for (unsigned i = 0; i < ObjCCategoryImpls.size() && !Method; i++) {
+ if (ObjCCategoryImpls[i]->getClassInterface() == ClassDecl)
+ Method = ObjCCategoryImpls[i]->getClassMethod(Context, Sel);
+ }
+ }
+
+ // Before we give up, check if the selector is an instance method.
+ // But only in the root. This matches gcc's behaviour and what the
+ // runtime expects.
+ if (!Method && !ClassDecl->getSuperClass()) {
+ Method = ClassDecl->lookupInstanceMethod(Context, Sel);
+ // Look through local category implementations associated
+ // with the root class.
+ if (!Method)
+ Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
+ }
+
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return Method;
+}
+
+ObjCMethodDecl *Sema::LookupPrivateInstanceMethod(Selector Sel,
+ ObjCInterfaceDecl *ClassDecl) {
+ ObjCMethodDecl *Method = 0;
+ while (ClassDecl && !Method) {
+ // If we have implementations in scope, check "private" methods.
+ if (ObjCImplementationDecl *ImpDecl
+ = LookupObjCImplementation(ClassDecl->getIdentifier()))
+ Method = ImpDecl->getInstanceMethod(Context, Sel);
+
+ // Look through local category implementations associated with the class.
+ if (!Method) {
+ for (unsigned i = 0; i < ObjCCategoryImpls.size() && !Method; i++) {
+ if (ObjCCategoryImpls[i]->getClassInterface() == ClassDecl)
+ Method = ObjCCategoryImpls[i]->getInstanceMethod(Context, Sel);
+ }
+ }
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return Method;
+}
+
+Action::OwningExprResult Sema::ActOnClassPropertyRefExpr(
+ IdentifierInfo &receiverName,
+ IdentifierInfo &propertyName,
+ SourceLocation &receiverNameLoc,
+ SourceLocation &propertyNameLoc) {
+
+ ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(&receiverName);
+
+ // Search for a declared property first.
+
+ Selector Sel = PP.getSelectorTable().getNullarySelector(&propertyName);
+ ObjCMethodDecl *Getter = IFace->lookupClassMethod(Context, Sel);
+
+ // If this reference is in an @implementation, check for 'private' methods.
+ if (!Getter)
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl())
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface())
+ if (ObjCImplementationDecl *ImpDecl
+ = LookupObjCImplementation(ClassDecl->getIdentifier()))
+ Getter = ImpDecl->getClassMethod(Context, Sel);
+
+ if (Getter) {
+ // FIXME: refactor/share with ActOnMemberReference().
+ // Check if we can reference this property.
+ if (DiagnoseUseOfDecl(Getter, propertyNameLoc))
+ return ExprError();
+ }
+
+ // Look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), &propertyName);
+
+ ObjCMethodDecl *Setter = IFace->lookupClassMethod(Context, SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl())
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface())
+ if (ObjCImplementationDecl *ImpDecl
+ = LookupObjCImplementation(ClassDecl->getIdentifier()))
+ Setter = ImpDecl->getClassMethod(Context, SetterSel);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter) {
+ for (unsigned i = 0; i < ObjCCategoryImpls.size() && !Setter; i++) {
+ if (ObjCCategoryImpls[i]->getClassInterface() == IFace)
+ Setter = ObjCCategoryImpls[i]->getClassMethod(Context, SetterSel);
+ }
+ }
+
+ if (Setter && DiagnoseUseOfDecl(Setter, propertyNameLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ QualType PType;
+
+ if (Getter)
+ PType = Getter->getResultType();
+ else {
+ for (ObjCMethodDecl::param_iterator PI = Setter->param_begin(),
+ E = Setter->param_end(); PI != E; ++PI)
+ PType = (*PI)->getType();
+ }
+ return Owned(new (Context) ObjCKVCRefExpr(Getter, PType, Setter,
+ propertyNameLoc, IFace, receiverNameLoc));
+ }
+ return ExprError(Diag(propertyNameLoc, diag::err_property_not_found)
+ << &propertyName << Context.getObjCInterfaceType(IFace));
+}
+
+
+// ActOnClassMessage - used for both unary and keyword messages.
+// ArgExprs is optional - if it is present, the number of expressions
+// is obtained from Sel.getNumArgs().
+Sema::ExprResult Sema::ActOnClassMessage(
+ Scope *S,
+ IdentifierInfo *receiverName, Selector Sel,
+ SourceLocation lbrac, SourceLocation receiverLoc,
+ SourceLocation selectorLoc, SourceLocation rbrac,
+ ExprTy **Args, unsigned NumArgs)
+{
+ assert(receiverName && "missing receiver class name");
+
+ Expr **ArgExprs = reinterpret_cast<Expr **>(Args);
+ ObjCInterfaceDecl* ClassDecl = 0;
+ bool isSuper = false;
+
+ if (receiverName->isStr("super")) {
+ if (getCurMethodDecl()) {
+ isSuper = true;
+ ObjCInterfaceDecl *OID = getCurMethodDecl()->getClassInterface();
+ if (!OID)
+ return Diag(lbrac, diag::error_no_super_class_message)
+ << getCurMethodDecl()->getDeclName();
+ ClassDecl = OID->getSuperClass();
+ if (!ClassDecl)
+ return Diag(lbrac, diag::error_no_super_class) << OID->getDeclName();
+ if (getCurMethodDecl()->isInstanceMethod()) {
+ QualType superTy = Context.getObjCInterfaceType(ClassDecl);
+ superTy = Context.getPointerType(superTy);
+ ExprResult ReceiverExpr = new (Context) ObjCSuperExpr(SourceLocation(),
+ superTy);
+ // We are really in an instance method, redirect.
+ return ActOnInstanceMessage(ReceiverExpr.get(), Sel, lbrac,
+ selectorLoc, rbrac, Args, NumArgs);
+ }
+ // We are sending a message to 'super' within a class method. Do nothing,
+ // the receiver will pass through as 'super' (how convenient:-).
+ } else {
+ // 'super' has been used outside a method context. If a variable named
+ // 'super' has been declared, redirect. If not, produce a diagnostic.
+ NamedDecl *SuperDecl = LookupName(S, receiverName, LookupOrdinaryName);
+ ValueDecl *VD = dyn_cast_or_null<ValueDecl>(SuperDecl);
+ if (VD) {
+ ExprResult ReceiverExpr = new (Context) DeclRefExpr(VD, VD->getType(),
+ receiverLoc);
+ // We are really in an instance method, redirect.
+ return ActOnInstanceMessage(ReceiverExpr.get(), Sel, lbrac,
+ selectorLoc, rbrac, Args, NumArgs);
+ }
+ return Diag(receiverLoc, diag::err_undeclared_var_use) << receiverName;
+ }
+ } else
+ ClassDecl = getObjCInterfaceDecl(receiverName);
+
+ // The following code allows for the following GCC-ism:
+ //
+ // typedef XCElementDisplayRect XCElementGraphicsRect;
+ //
+ // @implementation XCRASlice
+ // - whatever { // Note that XCElementGraphicsRect is a typedef name.
+ // _sGraphicsDelegate =[[XCElementGraphicsRect alloc] init];
+ // }
+ //
+ // If necessary, the following lookup could move to getObjCInterfaceDecl().
+ if (!ClassDecl) {
+ NamedDecl *IDecl = LookupName(TUScope, receiverName, LookupOrdinaryName);
+ if (TypedefDecl *OCTD = dyn_cast_or_null<TypedefDecl>(IDecl)) {
+ const ObjCInterfaceType *OCIT;
+ OCIT = OCTD->getUnderlyingType()->getAsObjCInterfaceType();
+ if (!OCIT) {
+ Diag(receiverLoc, diag::err_invalid_receiver_to_message);
+ return true;
+ }
+ ClassDecl = OCIT->getDecl();
+ }
+ }
+ assert(ClassDecl && "missing interface declaration");
+ ObjCMethodDecl *Method = 0;
+ QualType returnType;
+ if (ClassDecl->isForwardDecl()) {
+ // A forward class used in messaging is tread as a 'Class'
+ Diag(lbrac, diag::warn_receiver_forward_class) << ClassDecl->getDeclName();
+ Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(lbrac,rbrac));
+ if (Method)
+ Diag(Method->getLocation(), diag::note_method_sent_forward_class)
+ << Method->getDeclName();
+ }
+ if (!Method)
+ Method = ClassDecl->lookupClassMethod(Context, Sel);
+
+ // If we have an implementation in scope, check "private" methods.
+ if (!Method)
+ Method = LookupPrivateClassMethod(Sel, ClassDecl);
+
+ if (Method && DiagnoseUseOfDecl(Method, receiverLoc))
+ return true;
+
+ if (CheckMessageArgumentTypes(ArgExprs, NumArgs, Sel, Method, true,
+ lbrac, rbrac, returnType))
+ return true;
+
+ returnType = returnType.getNonReferenceType();
+
+ // If we have the ObjCInterfaceDecl* for the class that is receiving the
+ // message, use that to construct the ObjCMessageExpr. Otherwise pass on the
+ // IdentifierInfo* for the class.
+ // FIXME: need to do a better job handling 'super' usage within a class. For
+ // now, we simply pass the "super" identifier through (which isn't consistent
+ // with instance methods.
+ if (isSuper)
+ return new (Context) ObjCMessageExpr(receiverName, Sel, returnType, Method,
+ lbrac, rbrac, ArgExprs, NumArgs);
+ else
+ return new (Context) ObjCMessageExpr(ClassDecl, Sel, returnType, Method,
+ lbrac, rbrac, ArgExprs, NumArgs);
+}
+
+// ActOnInstanceMessage - used for both unary and keyword messages.
+// ArgExprs is optional - if it is present, the number of expressions
+// is obtained from Sel.getNumArgs().
+Sema::ExprResult Sema::ActOnInstanceMessage(ExprTy *receiver, Selector Sel,
+ SourceLocation lbrac,
+ SourceLocation receiverLoc,
+ SourceLocation rbrac,
+ ExprTy **Args, unsigned NumArgs) {
+ assert(receiver && "missing receiver expression");
+
+ Expr **ArgExprs = reinterpret_cast<Expr **>(Args);
+ Expr *RExpr = static_cast<Expr *>(receiver);
+
+ // If necessary, apply function/array conversion to the receiver.
+ // C99 6.7.5.3p[7,8].
+ DefaultFunctionArrayConversion(RExpr);
+
+ QualType returnType;
+ QualType ReceiverCType =
+ Context.getCanonicalType(RExpr->getType()).getUnqualifiedType();
+
+ // Handle messages to 'super'.
+ if (isa<ObjCSuperExpr>(RExpr)) {
+ ObjCMethodDecl *Method = 0;
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ // If we have an interface in scope, check 'super' methods.
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface())
+ if (ObjCInterfaceDecl *SuperDecl = ClassDecl->getSuperClass()) {
+ Method = SuperDecl->lookupInstanceMethod(Context, Sel);
+
+ if (!Method)
+ // If we have implementations in scope, check "private" methods.
+ Method = LookupPrivateInstanceMethod(Sel, SuperDecl);
+ }
+ }
+
+ if (Method && DiagnoseUseOfDecl(Method, receiverLoc))
+ return true;
+
+ if (CheckMessageArgumentTypes(ArgExprs, NumArgs, Sel, Method, false,
+ lbrac, rbrac, returnType))
+ return true;
+
+ returnType = returnType.getNonReferenceType();
+ return new (Context) ObjCMessageExpr(RExpr, Sel, returnType, Method, lbrac,
+ rbrac, ArgExprs, NumArgs);
+ }
+
+ // Handle messages to id.
+ if (ReceiverCType == Context.getCanonicalType(Context.getObjCIdType()) ||
+ ReceiverCType->isBlockPointerType() ||
+ Context.isObjCNSObjectType(RExpr->getType())) {
+ ObjCMethodDecl *Method = LookupInstanceMethodInGlobalPool(
+ Sel, SourceRange(lbrac,rbrac));
+ if (!Method)
+ Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(lbrac, rbrac));
+ if (CheckMessageArgumentTypes(ArgExprs, NumArgs, Sel, Method, false,
+ lbrac, rbrac, returnType))
+ return true;
+ returnType = returnType.getNonReferenceType();
+ return new (Context) ObjCMessageExpr(RExpr, Sel, returnType, Method, lbrac,
+ rbrac, ArgExprs, NumArgs);
+ }
+
+ // Handle messages to Class.
+ if (ReceiverCType == Context.getCanonicalType(Context.getObjCClassType())) {
+ ObjCMethodDecl *Method = 0;
+
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) {
+ // First check the public methods in the class interface.
+ Method = ClassDecl->lookupClassMethod(Context, Sel);
+
+ if (!Method)
+ Method = LookupPrivateClassMethod(Sel, ClassDecl);
+ }
+ if (Method && DiagnoseUseOfDecl(Method, receiverLoc))
+ return true;
+ }
+ if (!Method) {
+ // If not messaging 'self', look for any factory method named 'Sel'.
+ if (!isSelfExpr(RExpr)) {
+ Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(lbrac,rbrac));
+ if (!Method) {
+ // If no class (factory) method was found, check if an _instance_
+ // method of the same name exists in the root class only.
+ Method = LookupInstanceMethodInGlobalPool(
+ Sel, SourceRange(lbrac,rbrac));
+ if (Method)
+ if (const ObjCInterfaceDecl *ID =
+ dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext())) {
+ if (ID->getSuperClass())
+ Diag(lbrac, diag::warn_root_inst_method_not_found)
+ << Sel << SourceRange(lbrac, rbrac);
+ }
+ }
+ }
+ }
+ if (CheckMessageArgumentTypes(ArgExprs, NumArgs, Sel, Method, false,
+ lbrac, rbrac, returnType))
+ return true;
+ returnType = returnType.getNonReferenceType();
+ return new (Context) ObjCMessageExpr(RExpr, Sel, returnType, Method, lbrac,
+ rbrac, ArgExprs, NumArgs);
+ }
+
+ ObjCMethodDecl *Method = 0;
+ ObjCInterfaceDecl* ClassDecl = 0;
+
+ // We allow sending a message to a qualified ID ("id<foo>"), which is ok as
+ // long as one of the protocols implements the selector (if not, warn).
+ if (ObjCQualifiedIdType *QIdTy = dyn_cast<ObjCQualifiedIdType>(ReceiverCType)) {
+ // Search protocols for instance methods.
+ for (ObjCQualifiedIdType::qual_iterator I = QIdTy->qual_begin(),
+ E = QIdTy->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *PDecl = *I;
+ if (PDecl && (Method = PDecl->lookupInstanceMethod(Context, Sel)))
+ break;
+ // Since we aren't supporting "Class<foo>", look for a class method.
+ if (PDecl && (Method = PDecl->lookupClassMethod(Context, Sel)))
+ break;
+ }
+ } else if (const ObjCInterfaceType *OCIType =
+ ReceiverCType->getAsPointerToObjCInterfaceType()) {
+ // We allow sending a message to a pointer to an interface (an object).
+
+ ClassDecl = OCIType->getDecl();
+ // FIXME: consider using LookupInstanceMethodInGlobalPool, since it will be
+ // faster than the following method (which can do *many* linear searches).
+ // The idea is to add class info to InstanceMethodPool.
+ Method = ClassDecl->lookupInstanceMethod(Context, Sel);
+
+ if (!Method) {
+ // Search protocol qualifiers.
+ for (ObjCQualifiedInterfaceType::qual_iterator QI = OCIType->qual_begin(),
+ E = OCIType->qual_end(); QI != E; ++QI) {
+ if ((Method = (*QI)->lookupInstanceMethod(Context, Sel)))
+ break;
+ }
+ }
+ if (!Method) {
+ // If we have implementations in scope, check "private" methods.
+ Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
+
+ if (!Method && !isSelfExpr(RExpr)) {
+ // If we still haven't found a method, look in the global pool. This
+ // behavior isn't very desirable, however we need it for GCC
+ // compatibility. FIXME: should we deviate??
+ if (OCIType->qual_empty()) {
+ Method = LookupInstanceMethodInGlobalPool(
+ Sel, SourceRange(lbrac,rbrac));
+ if (Method && !OCIType->getDecl()->isForwardDecl())
+ Diag(lbrac, diag::warn_maynot_respond)
+ << OCIType->getDecl()->getIdentifier()->getName() << Sel;
+ }
+ }
+ }
+ if (Method && DiagnoseUseOfDecl(Method, receiverLoc))
+ return true;
+ } else if (!Context.getObjCIdType().isNull() &&
+ (ReceiverCType->isPointerType() ||
+ (ReceiverCType->isIntegerType() &&
+ ReceiverCType->isScalarType()))) {
+ // Implicitly convert integers and pointers to 'id' but emit a warning.
+ Diag(lbrac, diag::warn_bad_receiver_type)
+ << RExpr->getType() << RExpr->getSourceRange();
+ ImpCastExprToType(RExpr, Context.getObjCIdType());
+ } else {
+ // Reject other random receiver types (e.g. structs).
+ Diag(lbrac, diag::err_bad_receiver_type)
+ << RExpr->getType() << RExpr->getSourceRange();
+ return true;
+ }
+
+ if (Method)
+ DiagnoseSentinelCalls(Method, receiverLoc, ArgExprs, NumArgs);
+ if (CheckMessageArgumentTypes(ArgExprs, NumArgs, Sel, Method, false,
+ lbrac, rbrac, returnType))
+ return true;
+ returnType = returnType.getNonReferenceType();
+ return new (Context) ObjCMessageExpr(RExpr, Sel, returnType, Method, lbrac,
+ rbrac, ArgExprs, NumArgs);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
+//===----------------------------------------------------------------------===//
+
+/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
+/// inheritance hierarchy of 'rProto'.
+static bool ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
+ ObjCProtocolDecl *rProto) {
+ if (lProto == rProto)
+ return true;
+ for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(),
+ E = rProto->protocol_end(); PI != E; ++PI)
+ if (ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ return false;
+}
+
+/// ClassImplementsProtocol - Checks that 'lProto' protocol
+/// has been implemented in IDecl class, its super class or categories (if
+/// lookupCategory is true).
+static bool ClassImplementsProtocol(ObjCProtocolDecl *lProto,
+ ObjCInterfaceDecl *IDecl,
+ bool lookupCategory,
+ bool RHSIsQualifiedID = false) {
+
+ // 1st, look up the class.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ IDecl->getReferencedProtocols();
+
+ for (ObjCList<ObjCProtocolDecl>::iterator PI = Protocols.begin(),
+ E = Protocols.end(); PI != E; ++PI) {
+ if (ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ // This is dubious and is added to be compatible with gcc. In gcc, it is
+ // also allowed assigning a protocol-qualified 'id' type to a LHS object
+ // when protocol in qualified LHS is in list of protocols in the rhs 'id'
+ // object. This IMO, should be a bug.
+ // FIXME: Treat this as an extension, and flag this as an error when GCC
+ // extensions are not enabled.
+ if (RHSIsQualifiedID && ProtocolCompatibleWithProtocol(*PI, lProto))
+ return true;
+ }
+
+ // 2nd, look up the category.
+ if (lookupCategory)
+ for (ObjCCategoryDecl *CDecl = IDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory()) {
+ for (ObjCCategoryDecl::protocol_iterator PI = CDecl->protocol_begin(),
+ E = CDecl->protocol_end(); PI != E; ++PI)
+ if (ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ }
+
+ // 3rd, look up the super class(s)
+ if (IDecl->getSuperClass())
+ return
+ ClassImplementsProtocol(lProto, IDecl->getSuperClass(), lookupCategory,
+ RHSIsQualifiedID);
+
+ return false;
+}
+
+/// QualifiedIdConformsQualifiedId - compare id<p,...> with id<p1,...>
+/// return true if lhs's protocols conform to rhs's protocol; false
+/// otherwise.
+bool Sema::QualifiedIdConformsQualifiedId(QualType lhs, QualType rhs) {
+ if (lhs->isObjCQualifiedIdType() && rhs->isObjCQualifiedIdType())
+ return ObjCQualifiedIdTypesAreCompatible(lhs, rhs, false);
+ return false;
+}
+
+/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
+/// ObjCQualifiedIDType.
+/// FIXME: Move to ASTContext::typesAreCompatible() and friends.
+bool Sema::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
+ bool compare) {
+ // Allow id<P..> and an 'id' or void* type in all cases.
+ if (const PointerType *PT = lhs->getAsPointerType()) {
+ QualType PointeeTy = PT->getPointeeType();
+ if (PointeeTy->isVoidType() ||
+ Context.isObjCIdStructType(PointeeTy) ||
+ Context.isObjCClassStructType(PointeeTy))
+ return true;
+ } else if (const PointerType *PT = rhs->getAsPointerType()) {
+ QualType PointeeTy = PT->getPointeeType();
+ if (PointeeTy->isVoidType() ||
+ Context.isObjCIdStructType(PointeeTy) ||
+ Context.isObjCClassStructType(PointeeTy))
+ return true;
+ }
+
+ if (const ObjCQualifiedIdType *lhsQID = lhs->getAsObjCQualifiedIdType()) {
+ const ObjCQualifiedIdType *rhsQID = rhs->getAsObjCQualifiedIdType();
+ const ObjCQualifiedInterfaceType *rhsQI = 0;
+ QualType rtype;
+
+ if (!rhsQID) {
+ // Not comparing two ObjCQualifiedIdType's?
+ if (!rhs->isPointerType()) return false;
+
+ rtype = rhs->getAsPointerType()->getPointeeType();
+ rhsQI = rtype->getAsObjCQualifiedInterfaceType();
+ if (rhsQI == 0) {
+ // If the RHS is a unqualified interface pointer "NSString*",
+ // make sure we check the class hierarchy.
+ if (const ObjCInterfaceType *IT = rtype->getAsObjCInterfaceType()) {
+ ObjCInterfaceDecl *rhsID = IT->getDecl();
+ for (ObjCQualifiedIdType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (!ClassImplementsProtocol(*I, rhsID, true))
+ return false;
+ }
+ return true;
+ }
+ }
+ }
+
+ ObjCQualifiedIdType::qual_iterator RHSProtoI, RHSProtoE;
+ if (rhsQI) { // We have a qualified interface (e.g. "NSObject<Proto> *").
+ RHSProtoI = rhsQI->qual_begin();
+ RHSProtoE = rhsQI->qual_end();
+ } else if (rhsQID) { // We have a qualified id (e.g. "id<Proto> *").
+ RHSProtoI = rhsQID->qual_begin();
+ RHSProtoE = rhsQID->qual_end();
+ } else {
+ return false;
+ }
+
+ for (ObjCQualifiedIdType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *lhsProto = *I;
+ bool match = false;
+
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ for (; RHSProtoI != RHSProtoE; ++RHSProtoI) {
+ ObjCProtocolDecl *rhsProto = *RHSProtoI;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (rhsQI) {
+ // If the RHS is a qualified interface pointer "NSString<P>*",
+ // make sure we check the class hierarchy.
+ if (const ObjCInterfaceType *IT = rtype->getAsObjCInterfaceType()) {
+ ObjCInterfaceDecl *rhsID = IT->getDecl();
+ for (ObjCQualifiedIdType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (ClassImplementsProtocol(*I, rhsID, true)) {
+ match = true;
+ break;
+ }
+ }
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ return true;
+ }
+
+ const ObjCQualifiedIdType *rhsQID = rhs->getAsObjCQualifiedIdType();
+ assert(rhsQID && "One of the LHS/RHS should be id<x>");
+
+ if (!lhs->isPointerType())
+ return false;
+
+ QualType ltype = lhs->getAsPointerType()->getPointeeType();
+ if (const ObjCQualifiedInterfaceType *lhsQI =
+ ltype->getAsObjCQualifiedInterfaceType()) {
+ ObjCQualifiedIdType::qual_iterator LHSProtoI = lhsQI->qual_begin();
+ ObjCQualifiedIdType::qual_iterator LHSProtoE = lhsQI->qual_end();
+ for (; LHSProtoI != LHSProtoE; ++LHSProtoI) {
+ bool match = false;
+ ObjCProtocolDecl *lhsProto = *LHSProtoI;
+ for (ObjCQualifiedIdType::qual_iterator I = rhsQID->qual_begin(),
+ E = rhsQID->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *rhsProto = *I;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ return true;
+ }
+
+ if (const ObjCInterfaceType *IT = ltype->getAsObjCInterfaceType()) {
+ // for static type vs. qualified 'id' type, check that class implements
+ // all of 'id's protocols.
+ ObjCInterfaceDecl *lhsID = IT->getDecl();
+ for (ObjCQualifiedIdType::qual_iterator I = rhsQID->qual_begin(),
+ E = rhsQID->qual_end(); I != E; ++I) {
+ if (!ClassImplementsProtocol(*I, lhsID, compare, true))
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
diff --git a/lib/Sema/SemaInherit.cpp b/lib/Sema/SemaInherit.cpp
new file mode 100644
index 0000000..1b968f0
--- /dev/null
+++ b/lib/Sema/SemaInherit.cpp
@@ -0,0 +1,344 @@
+//===---- SemaInherit.cpp - C++ Inheritance ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ inheritance semantics,
+// including searching the inheritance hierarchy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SemaInherit.h"
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeOrdering.h"
+#include <algorithm>
+#include <memory>
+#include <set>
+#include <string>
+
+using namespace clang;
+
+/// \brief Computes the set of declarations referenced by these base
+/// paths.
+void BasePaths::ComputeDeclsFound() {
+ assert(NumDeclsFound == 0 && !DeclsFound &&
+ "Already computed the set of declarations");
+
+ std::set<NamedDecl *> Decls;
+ for (BasePaths::paths_iterator Path = begin(), PathEnd = end();
+ Path != PathEnd; ++Path)
+ Decls.insert(*Path->Decls.first);
+
+ NumDeclsFound = Decls.size();
+ DeclsFound = new NamedDecl * [NumDeclsFound];
+ std::copy(Decls.begin(), Decls.end(), DeclsFound);
+}
+
+BasePaths::decl_iterator BasePaths::found_decls_begin() {
+ if (NumDeclsFound == 0)
+ ComputeDeclsFound();
+ return DeclsFound;
+}
+
+BasePaths::decl_iterator BasePaths::found_decls_end() {
+ if (NumDeclsFound == 0)
+ ComputeDeclsFound();
+ return DeclsFound + NumDeclsFound;
+}
+
+/// isAmbiguous - Determines whether the set of paths provided is
+/// ambiguous, i.e., there are two or more paths that refer to
+/// different base class subobjects of the same type. BaseType must be
+/// an unqualified, canonical class type.
+bool BasePaths::isAmbiguous(QualType BaseType) {
+ assert(BaseType->isCanonical() && "Base type must be the canonical type");
+ assert(BaseType.getCVRQualifiers() == 0 && "Base type must be unqualified");
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ return Subobjects.second + (Subobjects.first? 1 : 0) > 1;
+}
+
+/// clear - Clear out all prior path information.
+void BasePaths::clear() {
+ Paths.clear();
+ ClassSubobjects.clear();
+ ScratchPath.clear();
+ DetectedVirtual = 0;
+}
+
+/// @brief Swaps the contents of this BasePaths structure with the
+/// contents of Other.
+void BasePaths::swap(BasePaths &Other) {
+ std::swap(Origin, Other.Origin);
+ Paths.swap(Other.Paths);
+ ClassSubobjects.swap(Other.ClassSubobjects);
+ std::swap(FindAmbiguities, Other.FindAmbiguities);
+ std::swap(RecordPaths, Other.RecordPaths);
+ std::swap(DetectVirtual, Other.DetectVirtual);
+ std::swap(DetectedVirtual, Other.DetectedVirtual);
+}
+
+/// IsDerivedFrom - Determine whether the type Derived is derived from
+/// the type Base, ignoring qualifiers on Base and Derived. This
+/// routine does not assess whether an actual conversion from a
+/// Derived* to a Base* is legal, because it does not account for
+/// ambiguous conversions or conversions to private/protected bases.
+bool Sema::IsDerivedFrom(QualType Derived, QualType Base) {
+ BasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+ return IsDerivedFrom(Derived, Base, Paths);
+}
+
+/// IsDerivedFrom - Determine whether the type Derived is derived from
+/// the type Base, ignoring qualifiers on Base and Derived. This
+/// routine does not assess whether an actual conversion from a
+/// Derived* to a Base* is legal, because it does not account for
+/// ambiguous conversions or conversions to private/protected
+/// bases. This routine will use Paths to determine if there are
+/// ambiguous paths (if @c Paths.isFindingAmbiguities()) and record
+/// information about all of the paths (if @c Paths.isRecordingPaths()).
+bool Sema::IsDerivedFrom(QualType Derived, QualType Base, BasePaths &Paths) {
+ Derived = Context.getCanonicalType(Derived).getUnqualifiedType();
+ Base = Context.getCanonicalType(Base).getUnqualifiedType();
+
+ if (!Derived->isRecordType() || !Base->isRecordType())
+ return false;
+
+ if (Derived == Base)
+ return false;
+
+ Paths.setOrigin(Derived);
+ return LookupInBases(cast<CXXRecordDecl>(Derived->getAsRecordType()->getDecl()),
+ MemberLookupCriteria(Base), Paths);
+}
+
+/// LookupInBases - Look for something that meets the specified
+/// Criteria within the base classes of Class (or any of its base
+/// classes, transitively). This routine populates BasePaths with the
+/// list of paths that one can take to find the entity that meets the
+/// search criteria, and returns true if any such entity is found. The
+/// various options passed to the BasePath constructor will affect the
+/// behavior of this lookup, e.g., whether it finds ambiguities,
+/// records paths, or attempts to detect the use of virtual base
+/// classes.
+bool Sema::LookupInBases(CXXRecordDecl *Class,
+ const MemberLookupCriteria& Criteria,
+ BasePaths &Paths) {
+ bool FoundPath = false;
+
+ for (CXXRecordDecl::base_class_const_iterator BaseSpec = Class->bases_begin(),
+ BaseSpecEnd = Class->bases_end();
+ BaseSpec != BaseSpecEnd; ++BaseSpec) {
+ // Find the record of the base class subobjects for this type.
+ QualType BaseType = Context.getCanonicalType(BaseSpec->getType());
+ BaseType = BaseType.getUnqualifiedType();
+
+ // Determine whether we need to visit this base class at all,
+ // updating the count of subobjects appropriately.
+ std::pair<bool, unsigned>& Subobjects = Paths.ClassSubobjects[BaseType];
+ bool VisitBase = true;
+ bool SetVirtual = false;
+ if (BaseSpec->isVirtual()) {
+ VisitBase = !Subobjects.first;
+ Subobjects.first = true;
+ if (Paths.isDetectingVirtual() && Paths.DetectedVirtual == 0) {
+ // If this is the first virtual we find, remember it. If it turns out
+ // there is no base path here, we'll reset it later.
+ Paths.DetectedVirtual = BaseType->getAsRecordType();
+ SetVirtual = true;
+ }
+ } else
+ ++Subobjects.second;
+
+ if (Paths.isRecordingPaths()) {
+ // Add this base specifier to the current path.
+ BasePathElement Element;
+ Element.Base = &*BaseSpec;
+ Element.Class = Class;
+ if (BaseSpec->isVirtual())
+ Element.SubobjectNumber = 0;
+ else
+ Element.SubobjectNumber = Subobjects.second;
+ Paths.ScratchPath.push_back(Element);
+ }
+
+ CXXRecordDecl *BaseRecord
+ = cast<CXXRecordDecl>(BaseSpec->getType()->getAsRecordType()->getDecl());
+
+ // Either look at the base class type or look into the base class
+ // type to see if we've found a member that meets the search
+ // criteria.
+ bool FoundPathToThisBase = false;
+ switch (Criteria.Kind) {
+ case MemberLookupCriteria::LK_Base:
+ FoundPathToThisBase
+ = (Context.getCanonicalType(BaseSpec->getType()) == Criteria.Base);
+ break;
+ case MemberLookupCriteria::LK_NamedMember:
+ Paths.ScratchPath.Decls = BaseRecord->lookup(Context, Criteria.Name);
+ while (Paths.ScratchPath.Decls.first != Paths.ScratchPath.Decls.second) {
+ if (isAcceptableLookupResult(*Paths.ScratchPath.Decls.first,
+ Criteria.NameKind, Criteria.IDNS)) {
+ FoundPathToThisBase = true;
+ break;
+ }
+ ++Paths.ScratchPath.Decls.first;
+ }
+ break;
+ case MemberLookupCriteria::LK_OverriddenMember:
+ Paths.ScratchPath.Decls =
+ BaseRecord->lookup(Context, Criteria.Method->getDeclName());
+ while (Paths.ScratchPath.Decls.first != Paths.ScratchPath.Decls.second) {
+ if (CXXMethodDecl *MD =
+ dyn_cast<CXXMethodDecl>(*Paths.ScratchPath.Decls.first)) {
+ OverloadedFunctionDecl::function_iterator MatchedDecl;
+ if (MD->isVirtual() &&
+ !IsOverload(Criteria.Method, MD, MatchedDecl)) {
+ FoundPathToThisBase = true;
+ break;
+ }
+ }
+
+ ++Paths.ScratchPath.Decls.first;
+ }
+ break;
+ }
+
+ if (FoundPathToThisBase) {
+ // We've found a path that terminates that this base.
+ FoundPath = true;
+ if (Paths.isRecordingPaths()) {
+ // We have a path. Make a copy of it before moving on.
+ Paths.Paths.push_back(Paths.ScratchPath);
+ } else if (!Paths.isFindingAmbiguities()) {
+ // We found a path and we don't care about ambiguities;
+ // return immediately.
+ return FoundPath;
+ }
+ }
+ // C++ [class.member.lookup]p2:
+ // A member name f in one sub-object B hides a member name f in
+ // a sub-object A if A is a base class sub-object of B. Any
+ // declarations that are so hidden are eliminated from
+ // consideration.
+ else if (VisitBase && LookupInBases(BaseRecord, Criteria, Paths)) {
+ // There is a path to a base class that meets the criteria. If we're not
+ // collecting paths or finding ambiguities, we're done.
+ FoundPath = true;
+ if (!Paths.isFindingAmbiguities())
+ return FoundPath;
+ }
+
+ // Pop this base specifier off the current path (if we're
+ // collecting paths).
+ if (Paths.isRecordingPaths())
+ Paths.ScratchPath.pop_back();
+ // If we set a virtual earlier, and this isn't a path, forget it again.
+ if (SetVirtual && !FoundPath) {
+ Paths.DetectedVirtual = 0;
+ }
+ }
+
+ return FoundPath;
+}
+
+/// CheckDerivedToBaseConversion - Check whether the Derived-to-Base
+/// conversion (where Derived and Base are class types) is
+/// well-formed, meaning that the conversion is unambiguous (and
+/// that all of the base classes are accessible). Returns true
+/// and emits a diagnostic if the code is ill-formed, returns false
+/// otherwise. Loc is the location where this routine should point to
+/// if there is an error, and Range is the source range to highlight
+/// if there is an error.
+bool
+Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ unsigned InaccessibleBaseID,
+ unsigned AmbigiousBaseConvID,
+ SourceLocation Loc, SourceRange Range,
+ DeclarationName Name) {
+ // First, determine whether the path from Derived to Base is
+ // ambiguous. This is slightly more expensive than checking whether
+ // the Derived to Base conversion exists, because here we need to
+ // explore multiple paths to determine if there is an ambiguity.
+ BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ bool DerivationOkay = IsDerivedFrom(Derived, Base, Paths);
+ assert(DerivationOkay &&
+ "Can only be used with a derived-to-base conversion");
+ (void)DerivationOkay;
+
+ if (!Paths.isAmbiguous(Context.getCanonicalType(Base).getUnqualifiedType())) {
+ // Check that the base class can be accessed.
+ return CheckBaseClassAccess(Derived, Base, InaccessibleBaseID, Paths, Loc,
+ Name);
+ }
+
+ // We know that the derived-to-base conversion is ambiguous, and
+ // we're going to produce a diagnostic. Perform the derived-to-base
+ // search just one more time to compute all of the possible paths so
+ // that we can print them out. This is more expensive than any of
+ // the previous derived-to-base checks we've done, but at this point
+ // performance isn't as much of an issue.
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ bool StillOkay = IsDerivedFrom(Derived, Base, Paths);
+ assert(StillOkay && "Can only be used with a derived-to-base conversion");
+ (void)StillOkay;
+
+ // Build up a textual representation of the ambiguous paths, e.g.,
+ // D -> B -> A, that will be used to illustrate the ambiguous
+ // conversions in the diagnostic. We only print one of the paths
+ // to each base class subobject.
+ std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
+
+ Diag(Loc, AmbigiousBaseConvID)
+ << Derived << Base << PathDisplayStr << Range << Name;
+ return true;
+}
+
+bool
+Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ SourceLocation Loc, SourceRange Range) {
+ return CheckDerivedToBaseConversion(Derived, Base,
+ diag::err_conv_to_inaccessible_base,
+ diag::err_ambiguous_derived_to_base_conv,
+ Loc, Range, DeclarationName());
+}
+
+
+/// @brief Builds a string representing ambiguous paths from a
+/// specific derived class to different subobjects of the same base
+/// class.
+///
+/// This function builds a string that can be used in error messages
+/// to show the different paths that one can take through the
+/// inheritance hierarchy to go from the derived class to different
+/// subobjects of a base class. The result looks something like this:
+/// @code
+/// struct D -> struct B -> struct A
+/// struct D -> struct C -> struct A
+/// @endcode
+std::string Sema::getAmbiguousPathsDisplayString(BasePaths &Paths) {
+ std::string PathDisplayStr;
+ std::set<unsigned> DisplayedPaths;
+ for (BasePaths::paths_iterator Path = Paths.begin();
+ Path != Paths.end(); ++Path) {
+ if (DisplayedPaths.insert(Path->back().SubobjectNumber).second) {
+ // We haven't displayed a path to this particular base
+ // class subobject yet.
+ PathDisplayStr += "\n ";
+ PathDisplayStr += Paths.getOrigin().getAsString();
+ for (BasePath::const_iterator Element = Path->begin();
+ Element != Path->end(); ++Element)
+ PathDisplayStr += " -> " + Element->Base->getType().getAsString();
+ }
+ }
+
+ return PathDisplayStr;
+}
diff --git a/lib/Sema/SemaInherit.h b/lib/Sema/SemaInherit.h
new file mode 100644
index 0000000..b1e791a
--- /dev/null
+++ b/lib/Sema/SemaInherit.h
@@ -0,0 +1,248 @@
+//===------ SemaInherit.h - C++ Inheritance ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema data structures that help analyse C++
+// inheritance semantics, including searching the inheritance
+// hierarchy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_INHERIT_H
+#define LLVM_CLANG_SEMA_INHERIT_H
+
+#include "Sema.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeOrdering.h"
+#include "llvm/ADT/SmallVector.h"
+#include <list>
+#include <map>
+
+namespace clang {
+ class CXXBaseSpecifier;
+
+ /// BasePathElement - An element in a path from a derived class to a
+ /// base class. Each step in the path references the link from a
+ /// derived class to one of its direct base classes, along with a
+ /// base "number" that identifies which base subobject of the
+ /// original derived class we are referencing.
+ struct BasePathElement {
+ /// Base - The base specifier that states the link from a derived
+ /// class to a base class, which will be followed by this base
+ /// path element.
+ const CXXBaseSpecifier *Base;
+
+ /// Class - The record decl of the class that the base is a base of.
+ const CXXRecordDecl *Class;
+
+ /// SubobjectNumber - Identifies which base class subobject (of type
+ /// @c Base->getType()) this base path element refers to. This
+ /// value is only valid if @c !Base->isVirtual(), because there
+ /// is no base numbering for the zero or one virtual bases of a
+ /// given type.
+ int SubobjectNumber;
+ };
+
+ /// BasePath - Represents a path from a specific derived class
+ /// (which is not represented as part of the path) to a particular
+ /// (direct or indirect) base class subobject that contains some
+ /// number of declarations with the same name. Individual elements
+ /// in the path are described by the BasePathElement structure,
+ /// which captures both the link from a derived class to one of its
+ /// direct bases and identification describing which base class
+ /// subobject is being used.
+ struct BasePath : public llvm::SmallVector<BasePathElement, 4> {
+ /// Decls - The set of declarations found inside this base class
+ /// subobject.
+ DeclContext::lookup_result Decls;
+ };
+
+ /// BasePaths - Represents the set of paths from a derived class to
+ /// one of its (direct or indirect) bases. For example, given the
+ /// following class hierachy:
+ ///
+ /// @code
+ /// class A { };
+ /// class B : public A { };
+ /// class C : public A { };
+ /// class D : public B, public C{ };
+ /// @endcode
+ ///
+ /// There are two potential BasePaths to represent paths from D to a
+ /// base subobject of type A. One path is (D,0) -> (B,0) -> (A,0)
+ /// and another is (D,0)->(C,0)->(A,1). These two paths actually
+ /// refer to two different base class subobjects of the same type,
+ /// so the BasePaths object refers to an ambiguous path. On the
+ /// other hand, consider the following class hierarchy:
+ ///
+ /// @code
+ /// class A { };
+ /// class B : public virtual A { };
+ /// class C : public virtual A { };
+ /// class D : public B, public C{ };
+ /// @endcode
+ ///
+ /// Here, there are two potential BasePaths again, (D, 0) -> (B, 0)
+ /// -> (A,v) and (D, 0) -> (C, 0) -> (A, v), but since both of them
+ /// refer to the same base class subobject of type A (the virtual
+ /// one), there is no ambiguity.
+ class BasePaths {
+ /// Origin - The type from which this search originated.
+ QualType Origin;
+
+ /// Paths - The actual set of paths that can be taken from the
+ /// derived class to the same base class.
+ std::list<BasePath> Paths;
+
+ /// ClassSubobjects - Records the class subobjects for each class
+ /// type that we've seen. The first element in the pair says
+ /// whether we found a path to a virtual base for that class type,
+ /// while the element contains the number of non-virtual base
+ /// class subobjects for that class type. The key of the map is
+ /// the cv-unqualified canonical type of the base class subobject.
+ std::map<QualType, std::pair<bool, unsigned>, QualTypeOrdering>
+ ClassSubobjects;
+
+ /// FindAmbiguities - Whether Sema::IsDerivedFrom should try find
+ /// ambiguous paths while it is looking for a path from a derived
+ /// type to a base type.
+ bool FindAmbiguities;
+
+ /// RecordPaths - Whether Sema::IsDerivedFrom should record paths
+ /// while it is determining whether there are paths from a derived
+ /// type to a base type.
+ bool RecordPaths;
+
+ /// DetectVirtual - Whether Sema::IsDerivedFrom should abort the search
+ /// if it finds a path that goes across a virtual base. The virtual class
+ /// is also recorded.
+ bool DetectVirtual;
+
+ /// ScratchPath - A BasePath that is used by Sema::IsDerivedFrom
+ /// to help build the set of paths.
+ BasePath ScratchPath;
+
+ /// DetectedVirtual - The base class that is virtual.
+ const RecordType *DetectedVirtual;
+
+ /// \brief Array of the declarations that have been found. This
+ /// array is constructed only if needed, e.g., to iterate over the
+ /// results within LookupResult.
+ NamedDecl **DeclsFound;
+ unsigned NumDeclsFound;
+
+ friend class Sema;
+
+ void ComputeDeclsFound();
+
+ public:
+ typedef std::list<BasePath>::const_iterator paths_iterator;
+ typedef NamedDecl **decl_iterator;
+
+ /// BasePaths - Construct a new BasePaths structure to record the
+ /// paths for a derived-to-base search.
+ explicit BasePaths(bool FindAmbiguities = true,
+ bool RecordPaths = true,
+ bool DetectVirtual = true)
+ : FindAmbiguities(FindAmbiguities), RecordPaths(RecordPaths),
+ DetectVirtual(DetectVirtual), DetectedVirtual(0), DeclsFound(0),
+ NumDeclsFound(0)
+ {}
+
+ ~BasePaths() { delete [] DeclsFound; }
+
+ paths_iterator begin() const { return Paths.begin(); }
+ paths_iterator end() const { return Paths.end(); }
+
+ BasePath& front() { return Paths.front(); }
+ const BasePath& front() const { return Paths.front(); }
+
+ decl_iterator found_decls_begin();
+ decl_iterator found_decls_end();
+
+ bool isAmbiguous(QualType BaseType);
+
+ /// isFindingAmbiguities - Whether we are finding multiple paths
+ /// to detect ambiguities.
+ bool isFindingAmbiguities() const { return FindAmbiguities; }
+
+ /// isRecordingPaths - Whether we are recording paths.
+ bool isRecordingPaths() const { return RecordPaths; }
+
+ /// setRecordingPaths - Specify whether we should be recording
+ /// paths or not.
+ void setRecordingPaths(bool RP) { RecordPaths = RP; }
+
+ /// isDetectingVirtual - Whether we are detecting virtual bases.
+ bool isDetectingVirtual() const { return DetectVirtual; }
+
+ /// getDetectedVirtual - The virtual base discovered on the path.
+ const RecordType* getDetectedVirtual() const {
+ return DetectedVirtual;
+ }
+
+ /// @brief Retrieve the type from which this base-paths search
+ /// began
+ QualType getOrigin() const { return Origin; }
+ void setOrigin(QualType Type) { Origin = Type; }
+
+ void clear();
+
+ void swap(BasePaths &Other);
+ };
+
+ /// MemberLookupCriteria - Criteria for performing lookup of a
+ /// member of a C++ class. Objects of this type are used to direct
+ /// Sema::LookupCXXClassMember.
+ struct MemberLookupCriteria {
+ /// LookupKind - the kind of lookup we're doing.
+ enum LookupKind {
+ LK_Base,
+ LK_NamedMember,
+ LK_OverriddenMember
+ };
+
+ /// MemberLookupCriteria - Constructs member lookup criteria to
+ /// search for a base class of type Base.
+ explicit MemberLookupCriteria(QualType Base)
+ : Kind(LK_Base), Base(Base) { }
+
+ /// MemberLookupCriteria - Constructs member lookup criteria to
+ /// search for a class member with the given Name.
+ explicit MemberLookupCriteria(DeclarationName Name,
+ Sema::LookupNameKind NameKind,
+ unsigned IDNS)
+ : Kind(LK_NamedMember), Name(Name), NameKind(NameKind), IDNS(IDNS) { }
+
+ explicit MemberLookupCriteria(CXXMethodDecl *MD)
+ : Kind(LK_OverriddenMember), Method(MD) { }
+
+ /// Kind - The kind of lookup we're doing.
+ /// LK_Base if we are looking for a base class (whose
+ /// type is Base). LK_NamedMember if we are looking for a named member of
+ /// the class (with the name Name).
+ LookupKind Kind;
+
+ /// Base - The type of the base class we're searching for, if
+ /// LookupBase is true.
+ QualType Base;
+
+ /// Name - The name of the member we're searching for, if
+ /// LookupBase is false.
+ DeclarationName Name;
+
+ Sema::LookupNameKind NameKind;
+ unsigned IDNS;
+
+ CXXMethodDecl *Method;
+ };
+}
+
+#endif
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
new file mode 100644
index 0000000..4e0eb1d
--- /dev/null
+++ b/lib/Sema/SemaInit.cpp
@@ -0,0 +1,1784 @@
+//===--- SemaInit.cpp - Semantic Analysis for Initializers ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for initializers. The main entry
+// point is Sema::CheckInitList(), but all of the work is performed
+// within the InitListChecker class.
+//
+// This file also implements Sema::CheckInitializerTypes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/Parse/Designator.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include <map>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Sema Initialization Checking
+//===----------------------------------------------------------------------===//
+
+static Expr *IsStringInit(Expr *Init, QualType DeclType, ASTContext &Context) {
+ const ArrayType *AT = Context.getAsArrayType(DeclType);
+ if (!AT) return 0;
+
+ if (!isa<ConstantArrayType>(AT) && !isa<IncompleteArrayType>(AT))
+ return 0;
+
+ // See if this is a string literal or @encode.
+ Init = Init->IgnoreParens();
+
+ // Handle @encode, which is a narrow string.
+ if (isa<ObjCEncodeExpr>(Init) && AT->getElementType()->isCharType())
+ return Init;
+
+ // Otherwise we can only handle string literals.
+ StringLiteral *SL = dyn_cast<StringLiteral>(Init);
+ if (SL == 0) return 0;
+
+ QualType ElemTy = Context.getCanonicalType(AT->getElementType());
+ // char array can be initialized with a narrow string.
+ // Only allow char x[] = "foo"; not char x[] = L"foo";
+ if (!SL->isWide())
+ return ElemTy->isCharType() ? Init : 0;
+
+ // wchar_t array can be initialized with a wide string: C99 6.7.8p15 (with
+ // correction from DR343): "An array with element type compatible with a
+ // qualified or unqualified version of wchar_t may be initialized by a wide
+ // string literal, optionally enclosed in braces."
+ if (Context.typesAreCompatible(Context.getWCharType(),
+ ElemTy.getUnqualifiedType()))
+ return Init;
+
+ return 0;
+}
+
+static bool CheckSingleInitializer(Expr *&Init, QualType DeclType,
+ bool DirectInit, Sema &S) {
+ // Get the type before calling CheckSingleAssignmentConstraints(), since
+ // it can promote the expression.
+ QualType InitType = Init->getType();
+
+ if (S.getLangOptions().CPlusPlus) {
+ // FIXME: I dislike this error message. A lot.
+ if (S.PerformImplicitConversion(Init, DeclType, "initializing", DirectInit))
+ return S.Diag(Init->getSourceRange().getBegin(),
+ diag::err_typecheck_convert_incompatible)
+ << DeclType << Init->getType() << "initializing"
+ << Init->getSourceRange();
+ return false;
+ }
+
+ Sema::AssignConvertType ConvTy =
+ S.CheckSingleAssignmentConstraints(DeclType, Init);
+ return S.DiagnoseAssignmentResult(ConvTy, Init->getLocStart(), DeclType,
+ InitType, Init, "initializing");
+}
+
+static void CheckStringInit(Expr *Str, QualType &DeclT, Sema &S) {
+ // Get the length of the string as parsed.
+ uint64_t StrLength =
+ cast<ConstantArrayType>(Str->getType())->getSize().getZExtValue();
+
+
+ const ArrayType *AT = S.Context.getAsArrayType(DeclT);
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
+ // C99 6.7.8p14. We have an array of character type with unknown size
+ // being initialized to a string literal.
+ llvm::APSInt ConstVal(32);
+ ConstVal = StrLength;
+ // Return a new array type (C99 6.7.8p22).
+ DeclT = S.Context.getConstantArrayType(IAT->getElementType(), ConstVal,
+ ArrayType::Normal, 0);
+ return;
+ }
+
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(AT);
+
+ // C99 6.7.8p14. We have an array of character type with known size. However,
+ // the size may be smaller or larger than the string we are initializing.
+ // FIXME: Avoid truncation for 64-bit length strings.
+ if (StrLength-1 > CAT->getSize().getZExtValue())
+ S.Diag(Str->getSourceRange().getBegin(),
+ diag::warn_initializer_string_for_char_array_too_long)
+ << Str->getSourceRange();
+
+ // Set the type to the actual size that we are initializing. If we have
+ // something like:
+ // char x[1] = "foo";
+ // then this will set the string literal's type to char[1].
+ Str->setType(DeclT);
+}
+
+bool Sema::CheckInitializerTypes(Expr *&Init, QualType &DeclType,
+ SourceLocation InitLoc,
+ DeclarationName InitEntity, bool DirectInit) {
+ if (DeclType->isDependentType() ||
+ Init->isTypeDependent() || Init->isValueDependent())
+ return false;
+
+ // C++ [dcl.init.ref]p1:
+ // A variable declared to be a T& or T&&, that is "reference to type T"
+ // (8.3.2), shall be initialized by an object, or function, of
+ // type T or by an object that can be converted into a T.
+ if (DeclType->isReferenceType())
+ return CheckReferenceInit(Init, DeclType, 0, false, DirectInit);
+
+ // C99 6.7.8p3: The type of the entity to be initialized shall be an array
+ // of unknown size ("[]") or an object type that is not a variable array type.
+ if (const VariableArrayType *VAT = Context.getAsVariableArrayType(DeclType))
+ return Diag(InitLoc, diag::err_variable_object_no_init)
+ << VAT->getSizeExpr()->getSourceRange();
+
+ InitListExpr *InitList = dyn_cast<InitListExpr>(Init);
+ if (!InitList) {
+ // FIXME: Handle wide strings
+ if (Expr *Str = IsStringInit(Init, DeclType, Context)) {
+ CheckStringInit(Str, DeclType, *this);
+ return false;
+ }
+
+ // C++ [dcl.init]p14:
+ // -- If the destination type is a (possibly cv-qualified) class
+ // type:
+ if (getLangOptions().CPlusPlus && DeclType->isRecordType()) {
+ QualType DeclTypeC = Context.getCanonicalType(DeclType);
+ QualType InitTypeC = Context.getCanonicalType(Init->getType());
+
+ // -- If the initialization is direct-initialization, or if it is
+ // copy-initialization where the cv-unqualified version of the
+ // source type is the same class as, or a derived class of, the
+ // class of the destination, constructors are considered.
+ if ((DeclTypeC.getUnqualifiedType() == InitTypeC.getUnqualifiedType()) ||
+ IsDerivedFrom(InitTypeC, DeclTypeC)) {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(DeclType->getAsRecordType()->getDecl());
+
+ // No need to make a CXXConstructExpr if both the ctor and dtor are
+ // trivial.
+ if (RD->hasTrivialConstructor() && RD->hasTrivialDestructor())
+ return false;
+
+ CXXConstructorDecl *Constructor
+ = PerformInitializationByConstructor(DeclType, &Init, 1,
+ InitLoc, Init->getSourceRange(),
+ InitEntity,
+ DirectInit? IK_Direct : IK_Copy);
+ if (!Constructor)
+ return true;
+
+ Init = CXXConstructExpr::Create(Context, DeclType, Constructor, false,
+ &Init, 1);
+ return false;
+ }
+
+ // -- Otherwise (i.e., for the remaining copy-initialization
+ // cases), user-defined conversion sequences that can
+ // convert from the source type to the destination type or
+ // (when a conversion function is used) to a derived class
+ // thereof are enumerated as described in 13.3.1.4, and the
+ // best one is chosen through overload resolution
+ // (13.3). If the conversion cannot be done or is
+ // ambiguous, the initialization is ill-formed. The
+ // function selected is called with the initializer
+ // expression as its argument; if the function is a
+ // constructor, the call initializes a temporary of the
+ // destination type.
+ // FIXME: We're pretending to do copy elision here; return to this when we
+ // have ASTs for such things.
+ if (!PerformImplicitConversion(Init, DeclType, "initializing"))
+ return false;
+
+ if (InitEntity)
+ return Diag(InitLoc, diag::err_cannot_initialize_decl)
+ << InitEntity << (int)(Init->isLvalue(Context) == Expr::LV_Valid)
+ << Init->getType() << Init->getSourceRange();
+ else
+ return Diag(InitLoc, diag::err_cannot_initialize_decl_noname)
+ << DeclType << (int)(Init->isLvalue(Context) == Expr::LV_Valid)
+ << Init->getType() << Init->getSourceRange();
+ }
+
+ // C99 6.7.8p16.
+ if (DeclType->isArrayType())
+ return Diag(Init->getLocStart(), diag::err_array_init_list_required)
+ << Init->getSourceRange();
+
+ return CheckSingleInitializer(Init, DeclType, DirectInit, *this);
+ }
+
+ bool hadError = CheckInitList(InitList, DeclType);
+ Init = InitList;
+ return hadError;
+}
+
+//===----------------------------------------------------------------------===//
+// Semantic checking for initializer lists.
+//===----------------------------------------------------------------------===//
+
+/// @brief Semantic checking for initializer lists.
+///
+/// The InitListChecker class contains a set of routines that each
+/// handle the initialization of a certain kind of entity, e.g.,
+/// arrays, vectors, struct/union types, scalars, etc. The
+/// InitListChecker itself performs a recursive walk of the subobject
+/// structure of the type to be initialized, while stepping through
+/// the initializer list one element at a time. The IList and Index
+/// parameters to each of the Check* routines contain the active
+/// (syntactic) initializer list and the index into that initializer
+/// list that represents the current initializer. Each routine is
+/// responsible for moving that Index forward as it consumes elements.
+///
+/// Each Check* routine also has a StructuredList/StructuredIndex
+/// arguments, which contains the current the "structured" (semantic)
+/// initializer list and the index into that initializer list where we
+/// are copying initializers as we map them over to the semantic
+/// list. Once we have completed our recursive walk of the subobject
+/// structure, we will have constructed a full semantic initializer
+/// list.
+///
+/// C99 designators cause changes in the initializer list traversal,
+/// because they make the initialization "jump" into a specific
+/// subobject and then continue the initialization from that
+/// point. CheckDesignatedInitializer() recursively steps into the
+/// designated subobject and manages backing out the recursion to
+/// initialize the subobjects after the one designated.
+namespace {
+class InitListChecker {
+ Sema &SemaRef;
+ bool hadError;
+ std::map<InitListExpr *, InitListExpr *> SyntacticToSemantic;
+ InitListExpr *FullyStructuredList;
+
+ void CheckImplicitInitList(InitListExpr *ParentIList, QualType T,
+ unsigned &Index, InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckExplicitInitList(InitListExpr *IList, QualType &T,
+ unsigned &Index, InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckListElementTypes(InitListExpr *IList, QualType &DeclType,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckSubElementType(InitListExpr *IList, QualType ElemType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckScalarType(InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckReferenceType(InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckVectorType(InitListExpr *IList, QualType DeclType, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckStructUnionTypes(InitListExpr *IList, QualType DeclType,
+ RecordDecl::field_iterator Field,
+ bool SubobjectIsDesignatorContext, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckArrayType(InitListExpr *IList, QualType &DeclType,
+ llvm::APSInt elementIndex,
+ bool SubobjectIsDesignatorContext, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ bool CheckDesignatedInitializer(InitListExpr *IList, DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ QualType &CurrentObjectType,
+ RecordDecl::field_iterator *NextField,
+ llvm::APSInt *NextElementIndex,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool FinishSubobjectInit,
+ bool TopLevelObject);
+ InitListExpr *getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
+ QualType CurrentObjectType,
+ InitListExpr *StructuredList,
+ unsigned StructuredIndex,
+ SourceRange InitRange);
+ void UpdateStructuredListElement(InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ Expr *expr);
+ int numArrayElements(QualType DeclType);
+ int numStructUnionElements(QualType DeclType);
+
+ void FillInValueInitializations(InitListExpr *ILE);
+public:
+ InitListChecker(Sema &S, InitListExpr *IL, QualType &T);
+ bool HadError() { return hadError; }
+
+ // @brief Retrieves the fully-structured initializer list used for
+ // semantic analysis and code generation.
+ InitListExpr *getFullyStructuredList() const { return FullyStructuredList; }
+};
+} // end anonymous namespace
+
+/// Recursively replaces NULL values within the given initializer list
+/// with expressions that perform value-initialization of the
+/// appropriate type.
+void InitListChecker::FillInValueInitializations(InitListExpr *ILE) {
+ assert((ILE->getType() != SemaRef.Context.VoidTy) &&
+ "Should not have void type");
+ SourceLocation Loc = ILE->getSourceRange().getBegin();
+ if (ILE->getSyntacticForm())
+ Loc = ILE->getSyntacticForm()->getSourceRange().getBegin();
+
+ if (const RecordType *RType = ILE->getType()->getAsRecordType()) {
+ unsigned Init = 0, NumInits = ILE->getNumInits();
+ for (RecordDecl::field_iterator
+ Field = RType->getDecl()->field_begin(SemaRef.Context),
+ FieldEnd = RType->getDecl()->field_end(SemaRef.Context);
+ Field != FieldEnd; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ if (Init >= NumInits || !ILE->getInit(Init)) {
+ if (Field->getType()->isReferenceType()) {
+ // C++ [dcl.init.aggr]p9:
+ // If an incomplete or empty initializer-list leaves a
+ // member of reference type uninitialized, the program is
+ // ill-formed.
+ SemaRef.Diag(Loc, diag::err_init_reference_member_uninitialized)
+ << Field->getType()
+ << ILE->getSyntacticForm()->getSourceRange();
+ SemaRef.Diag(Field->getLocation(),
+ diag::note_uninit_reference_member);
+ hadError = true;
+ return;
+ } else if (SemaRef.CheckValueInitialization(Field->getType(), Loc)) {
+ hadError = true;
+ return;
+ }
+
+ // FIXME: If value-initialization involves calling a constructor, should
+ // we make that call explicit in the representation (even when it means
+ // extending the initializer list)?
+ if (Init < NumInits && !hadError)
+ ILE->setInit(Init,
+ new (SemaRef.Context) ImplicitValueInitExpr(Field->getType()));
+ } else if (InitListExpr *InnerILE
+ = dyn_cast<InitListExpr>(ILE->getInit(Init)))
+ FillInValueInitializations(InnerILE);
+ ++Init;
+
+ // Only look at the first initialization of a union.
+ if (RType->getDecl()->isUnion())
+ break;
+ }
+
+ return;
+ }
+
+ QualType ElementType;
+
+ unsigned NumInits = ILE->getNumInits();
+ unsigned NumElements = NumInits;
+ if (const ArrayType *AType = SemaRef.Context.getAsArrayType(ILE->getType())) {
+ ElementType = AType->getElementType();
+ if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType))
+ NumElements = CAType->getSize().getZExtValue();
+ } else if (const VectorType *VType = ILE->getType()->getAsVectorType()) {
+ ElementType = VType->getElementType();
+ NumElements = VType->getNumElements();
+ } else
+ ElementType = ILE->getType();
+
+ for (unsigned Init = 0; Init != NumElements; ++Init) {
+ if (Init >= NumInits || !ILE->getInit(Init)) {
+ if (SemaRef.CheckValueInitialization(ElementType, Loc)) {
+ hadError = true;
+ return;
+ }
+
+ // FIXME: If value-initialization involves calling a constructor, should
+ // we make that call explicit in the representation (even when it means
+ // extending the initializer list)?
+ if (Init < NumInits && !hadError)
+ ILE->setInit(Init,
+ new (SemaRef.Context) ImplicitValueInitExpr(ElementType));
+ }
+ else if (InitListExpr *InnerILE =dyn_cast<InitListExpr>(ILE->getInit(Init)))
+ FillInValueInitializations(InnerILE);
+ }
+}
+
+
+InitListChecker::InitListChecker(Sema &S, InitListExpr *IL, QualType &T)
+ : SemaRef(S) {
+ hadError = false;
+
+ unsigned newIndex = 0;
+ unsigned newStructuredIndex = 0;
+ FullyStructuredList
+ = getStructuredSubobjectInit(IL, newIndex, T, 0, 0, IL->getSourceRange());
+ CheckExplicitInitList(IL, T, newIndex, FullyStructuredList, newStructuredIndex,
+ /*TopLevelObject=*/true);
+
+ if (!hadError)
+ FillInValueInitializations(FullyStructuredList);
+}
+
+int InitListChecker::numArrayElements(QualType DeclType) {
+ // FIXME: use a proper constant
+ int maxElements = 0x7FFFFFFF;
+ if (const ConstantArrayType *CAT =
+ SemaRef.Context.getAsConstantArrayType(DeclType)) {
+ maxElements = static_cast<int>(CAT->getSize().getZExtValue());
+ }
+ return maxElements;
+}
+
+int InitListChecker::numStructUnionElements(QualType DeclType) {
+ RecordDecl *structDecl = DeclType->getAsRecordType()->getDecl();
+ int InitializableMembers = 0;
+ for (RecordDecl::field_iterator
+ Field = structDecl->field_begin(SemaRef.Context),
+ FieldEnd = structDecl->field_end(SemaRef.Context);
+ Field != FieldEnd; ++Field) {
+ if ((*Field)->getIdentifier() || !(*Field)->isBitField())
+ ++InitializableMembers;
+ }
+ if (structDecl->isUnion())
+ return std::min(InitializableMembers, 1);
+ return InitializableMembers - structDecl->hasFlexibleArrayMember();
+}
+
+void InitListChecker::CheckImplicitInitList(InitListExpr *ParentIList,
+ QualType T, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ int maxElements = 0;
+
+ if (T->isArrayType())
+ maxElements = numArrayElements(T);
+ else if (T->isStructureType() || T->isUnionType())
+ maxElements = numStructUnionElements(T);
+ else if (T->isVectorType())
+ maxElements = T->getAsVectorType()->getNumElements();
+ else
+ assert(0 && "CheckImplicitInitList(): Illegal type");
+
+ if (maxElements == 0) {
+ SemaRef.Diag(ParentIList->getInit(Index)->getLocStart(),
+ diag::err_implicit_empty_initializer);
+ ++Index;
+ hadError = true;
+ return;
+ }
+
+ // Build a structured initializer list corresponding to this subobject.
+ InitListExpr *StructuredSubobjectInitList
+ = getStructuredSubobjectInit(ParentIList, Index, T, StructuredList,
+ StructuredIndex,
+ SourceRange(ParentIList->getInit(Index)->getSourceRange().getBegin(),
+ ParentIList->getSourceRange().getEnd()));
+ unsigned StructuredSubobjectInitIndex = 0;
+
+ // Check the element types and build the structural subobject.
+ unsigned StartIndex = Index;
+ CheckListElementTypes(ParentIList, T, false, Index,
+ StructuredSubobjectInitList,
+ StructuredSubobjectInitIndex,
+ TopLevelObject);
+ unsigned EndIndex = (Index == StartIndex? StartIndex : Index - 1);
+ StructuredSubobjectInitList->setType(T);
+
+ // Update the structured sub-object initializer so that it's ending
+ // range corresponds with the end of the last initializer it used.
+ if (EndIndex < ParentIList->getNumInits()) {
+ SourceLocation EndLoc
+ = ParentIList->getInit(EndIndex)->getSourceRange().getEnd();
+ StructuredSubobjectInitList->setRBraceLoc(EndLoc);
+ }
+}
+
+void InitListChecker::CheckExplicitInitList(InitListExpr *IList, QualType &T,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ assert(IList->isExplicit() && "Illegal Implicit InitListExpr");
+ SyntacticToSemantic[IList] = StructuredList;
+ StructuredList->setSyntacticForm(IList);
+ CheckListElementTypes(IList, T, true, Index, StructuredList,
+ StructuredIndex, TopLevelObject);
+ IList->setType(T);
+ StructuredList->setType(T);
+ if (hadError)
+ return;
+
+ if (Index < IList->getNumInits()) {
+ // We have leftover initializers
+ if (StructuredIndex == 1 &&
+ IsStringInit(StructuredList->getInit(0), T, SemaRef.Context)) {
+ unsigned DK = diag::warn_excess_initializers_in_char_array_initializer;
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ DK = diag::err_excess_initializers_in_char_array_initializer;
+ hadError = true;
+ }
+ // Special-case
+ SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
+ << IList->getInit(Index)->getSourceRange();
+ } else if (!T->isIncompleteType()) {
+ // Don't complain for incomplete types, since we'll get an error
+ // elsewhere
+ QualType CurrentObjectType = StructuredList->getType();
+ int initKind =
+ CurrentObjectType->isArrayType()? 0 :
+ CurrentObjectType->isVectorType()? 1 :
+ CurrentObjectType->isScalarType()? 2 :
+ CurrentObjectType->isUnionType()? 3 :
+ 4;
+
+ unsigned DK = diag::warn_excess_initializers;
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ DK = diag::err_excess_initializers;
+ hadError = true;
+ }
+
+ SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
+ << initKind << IList->getInit(Index)->getSourceRange();
+ }
+ }
+
+ if (T->isScalarType() && !TopLevelObject)
+ SemaRef.Diag(IList->getLocStart(), diag::warn_braces_around_scalar_init)
+ << IList->getSourceRange()
+ << CodeModificationHint::CreateRemoval(SourceRange(IList->getLocStart()))
+ << CodeModificationHint::CreateRemoval(SourceRange(IList->getLocEnd()));
+}
+
+void InitListChecker::CheckListElementTypes(InitListExpr *IList,
+ QualType &DeclType,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ if (DeclType->isScalarType()) {
+ CheckScalarType(IList, DeclType, Index, StructuredList, StructuredIndex);
+ } else if (DeclType->isVectorType()) {
+ CheckVectorType(IList, DeclType, Index, StructuredList, StructuredIndex);
+ } else if (DeclType->isAggregateType()) {
+ if (DeclType->isRecordType()) {
+ RecordDecl *RD = DeclType->getAsRecordType()->getDecl();
+ CheckStructUnionTypes(IList, DeclType, RD->field_begin(SemaRef.Context),
+ SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex,
+ TopLevelObject);
+ } else if (DeclType->isArrayType()) {
+ llvm::APSInt Zero(
+ SemaRef.Context.getTypeSize(SemaRef.Context.getSizeType()),
+ false);
+ CheckArrayType(IList, DeclType, Zero, SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex);
+ }
+ else
+ assert(0 && "Aggregate that isn't a structure or array?!");
+ } else if (DeclType->isVoidType() || DeclType->isFunctionType()) {
+ // This type is invalid, issue a diagnostic.
+ ++Index;
+ SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
+ << DeclType;
+ hadError = true;
+ } else if (DeclType->isRecordType()) {
+ // C++ [dcl.init]p14:
+ // [...] If the class is an aggregate (8.5.1), and the initializer
+ // is a brace-enclosed list, see 8.5.1.
+ //
+ // Note: 8.5.1 is handled below; here, we diagnose the case where
+ // we have an initializer list and a destination type that is not
+ // an aggregate.
+ // FIXME: In C++0x, this is yet another form of initialization.
+ SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
+ << DeclType << IList->getSourceRange();
+ hadError = true;
+ } else if (DeclType->isReferenceType()) {
+ CheckReferenceType(IList, DeclType, Index, StructuredList, StructuredIndex);
+ } else {
+ // In C, all types are either scalars or aggregates, but
+ // additional handling is needed here for C++ (and possibly others?).
+ assert(0 && "Unsupported initializer type");
+ }
+}
+
+void InitListChecker::CheckSubElementType(InitListExpr *IList,
+ QualType ElemType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ Expr *expr = IList->getInit(Index);
+ if (InitListExpr *SubInitList = dyn_cast<InitListExpr>(expr)) {
+ unsigned newIndex = 0;
+ unsigned newStructuredIndex = 0;
+ InitListExpr *newStructuredList
+ = getStructuredSubobjectInit(IList, Index, ElemType,
+ StructuredList, StructuredIndex,
+ SubInitList->getSourceRange());
+ CheckExplicitInitList(SubInitList, ElemType, newIndex,
+ newStructuredList, newStructuredIndex);
+ ++StructuredIndex;
+ ++Index;
+ } else if (Expr *Str = IsStringInit(expr, ElemType, SemaRef.Context)) {
+ CheckStringInit(Str, ElemType, SemaRef);
+ UpdateStructuredListElement(StructuredList, StructuredIndex, Str);
+ ++Index;
+ } else if (ElemType->isScalarType()) {
+ CheckScalarType(IList, ElemType, Index, StructuredList, StructuredIndex);
+ } else if (ElemType->isReferenceType()) {
+ CheckReferenceType(IList, ElemType, Index, StructuredList, StructuredIndex);
+ } else {
+ if (SemaRef.getLangOptions().CPlusPlus) {
+ // C++ [dcl.init.aggr]p12:
+ // All implicit type conversions (clause 4) are considered when
+ // initializing the aggregate member with an ini- tializer from
+ // an initializer-list. If the initializer can initialize a
+ // member, the member is initialized. [...]
+ ImplicitConversionSequence ICS
+ = SemaRef.TryCopyInitialization(expr, ElemType);
+ if (ICS.ConversionKind != ImplicitConversionSequence::BadConversion) {
+ if (SemaRef.PerformImplicitConversion(expr, ElemType, ICS,
+ "initializing"))
+ hadError = true;
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ ++Index;
+ return;
+ }
+
+ // Fall through for subaggregate initialization
+ } else {
+ // C99 6.7.8p13:
+ //
+ // The initializer for a structure or union object that has
+ // automatic storage duration shall be either an initializer
+ // list as described below, or a single expression that has
+ // compatible structure or union type. In the latter case, the
+ // initial value of the object, including unnamed members, is
+ // that of the expression.
+ if (ElemType->isRecordType() &&
+ SemaRef.Context.hasSameUnqualifiedType(expr->getType(), ElemType)) {
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ ++Index;
+ return;
+ }
+
+ // Fall through for subaggregate initialization
+ }
+
+ // C++ [dcl.init.aggr]p12:
+ //
+ // [...] Otherwise, if the member is itself a non-empty
+ // subaggregate, brace elision is assumed and the initializer is
+ // considered for the initialization of the first member of
+ // the subaggregate.
+ if (ElemType->isAggregateType() || ElemType->isVectorType()) {
+ CheckImplicitInitList(IList, ElemType, Index, StructuredList,
+ StructuredIndex);
+ ++StructuredIndex;
+ } else {
+ // We cannot initialize this element, so let
+ // PerformCopyInitialization produce the appropriate diagnostic.
+ SemaRef.PerformCopyInitialization(expr, ElemType, "initializing");
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ }
+ }
+}
+
+void InitListChecker::CheckScalarType(InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (Index < IList->getNumInits()) {
+ Expr *expr = IList->getInit(Index);
+ if (isa<InitListExpr>(expr)) {
+ SemaRef.Diag(IList->getLocStart(),
+ diag::err_many_braces_around_scalar_init)
+ << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ } else if (isa<DesignatedInitExpr>(expr)) {
+ SemaRef.Diag(expr->getSourceRange().getBegin(),
+ diag::err_designator_for_scalar_init)
+ << DeclType << expr->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ Expr *savExpr = expr; // Might be promoted by CheckSingleInitializer.
+ if (CheckSingleInitializer(expr, DeclType, false, SemaRef))
+ hadError = true; // types weren't compatible.
+ else if (savExpr != expr) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, expr);
+ }
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ ++Index;
+ } else {
+ SemaRef.Diag(IList->getLocStart(), diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+}
+
+void InitListChecker::CheckReferenceType(InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (Index < IList->getNumInits()) {
+ Expr *expr = IList->getInit(Index);
+ if (isa<InitListExpr>(expr)) {
+ SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
+ << DeclType << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ Expr *savExpr = expr; // Might be promoted by CheckSingleInitializer.
+ if (SemaRef.CheckReferenceInit(expr, DeclType))
+ hadError = true;
+ else if (savExpr != expr) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, expr);
+ }
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ ++Index;
+ } else {
+ // FIXME: It would be wonderful if we could point at the actual member. In
+ // general, it would be useful to pass location information down the stack,
+ // so that we know the location (or decl) of the "current object" being
+ // initialized.
+ SemaRef.Diag(IList->getLocStart(),
+ diag::err_init_reference_member_uninitialized)
+ << DeclType
+ << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+}
+
+void InitListChecker::CheckVectorType(InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (Index < IList->getNumInits()) {
+ const VectorType *VT = DeclType->getAsVectorType();
+ int maxElements = VT->getNumElements();
+ QualType elementType = VT->getElementType();
+
+ for (int i = 0; i < maxElements; ++i) {
+ // Don't attempt to go past the end of the init list
+ if (Index >= IList->getNumInits())
+ break;
+ CheckSubElementType(IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ }
+ }
+}
+
+void InitListChecker::CheckArrayType(InitListExpr *IList, QualType &DeclType,
+ llvm::APSInt elementIndex,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ // Check for the special-case of initializing an array with a string.
+ if (Index < IList->getNumInits()) {
+ if (Expr *Str = IsStringInit(IList->getInit(Index), DeclType,
+ SemaRef.Context)) {
+ CheckStringInit(Str, DeclType, SemaRef);
+ // We place the string literal directly into the resulting
+ // initializer list. This is the only place where the structure
+ // of the structured initializer list doesn't match exactly,
+ // because doing so would involve allocating one character
+ // constant for each string.
+ UpdateStructuredListElement(StructuredList, StructuredIndex, Str);
+ StructuredList->resizeInits(SemaRef.Context, StructuredIndex);
+ ++Index;
+ return;
+ }
+ }
+ if (const VariableArrayType *VAT =
+ SemaRef.Context.getAsVariableArrayType(DeclType)) {
+ // Check for VLAs; in standard C it would be possible to check this
+ // earlier, but I don't know where clang accepts VLAs (gcc accepts
+ // them in all sorts of strange places).
+ SemaRef.Diag(VAT->getSizeExpr()->getLocStart(),
+ diag::err_variable_object_no_init)
+ << VAT->getSizeExpr()->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ // We might know the maximum number of elements in advance.
+ llvm::APSInt maxElements(elementIndex.getBitWidth(),
+ elementIndex.isUnsigned());
+ bool maxElementsKnown = false;
+ if (const ConstantArrayType *CAT =
+ SemaRef.Context.getAsConstantArrayType(DeclType)) {
+ maxElements = CAT->getSize();
+ elementIndex.extOrTrunc(maxElements.getBitWidth());
+ elementIndex.setIsUnsigned(maxElements.isUnsigned());
+ maxElementsKnown = true;
+ }
+
+ QualType elementType = SemaRef.Context.getAsArrayType(DeclType)
+ ->getElementType();
+ while (Index < IList->getNumInits()) {
+ Expr *Init = IList->getInit(Index);
+ if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) {
+ // If we're not the subobject that matches up with the '{' for
+ // the designator, we shouldn't be handling the
+ // designator. Return immediately.
+ if (!SubobjectIsDesignatorContext)
+ return;
+
+ // Handle this designated initializer. elementIndex will be
+ // updated to be the next array element we'll initialize.
+ if (CheckDesignatedInitializer(IList, DIE, 0,
+ DeclType, 0, &elementIndex, Index,
+ StructuredList, StructuredIndex, true,
+ false)) {
+ hadError = true;
+ continue;
+ }
+
+ if (elementIndex.getBitWidth() > maxElements.getBitWidth())
+ maxElements.extend(elementIndex.getBitWidth());
+ else if (elementIndex.getBitWidth() < maxElements.getBitWidth())
+ elementIndex.extend(maxElements.getBitWidth());
+ elementIndex.setIsUnsigned(maxElements.isUnsigned());
+
+ // If the array is of incomplete type, keep track of the number of
+ // elements in the initializer.
+ if (!maxElementsKnown && elementIndex > maxElements)
+ maxElements = elementIndex;
+
+ continue;
+ }
+
+ // If we know the maximum number of elements, and we've already
+ // hit it, stop consuming elements in the initializer list.
+ if (maxElementsKnown && elementIndex == maxElements)
+ break;
+
+ // Check this element.
+ CheckSubElementType(IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ ++elementIndex;
+
+ // If the array is of incomplete type, keep track of the number of
+ // elements in the initializer.
+ if (!maxElementsKnown && elementIndex > maxElements)
+ maxElements = elementIndex;
+ }
+ if (!hadError && DeclType->isIncompleteArrayType()) {
+ // If this is an incomplete array type, the actual type needs to
+ // be calculated here.
+ llvm::APSInt Zero(maxElements.getBitWidth(), maxElements.isUnsigned());
+ if (maxElements == Zero) {
+ // Sizing an array implicitly to zero is not allowed by ISO C,
+ // but is supported by GNU.
+ SemaRef.Diag(IList->getLocStart(),
+ diag::ext_typecheck_zero_array_size);
+ }
+
+ DeclType = SemaRef.Context.getConstantArrayType(elementType, maxElements,
+ ArrayType::Normal, 0);
+ }
+}
+
+void InitListChecker::CheckStructUnionTypes(InitListExpr *IList,
+ QualType DeclType,
+ RecordDecl::field_iterator Field,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ RecordDecl* structDecl = DeclType->getAsRecordType()->getDecl();
+
+ // If the record is invalid, some of it's members are invalid. To avoid
+ // confusion, we forgo checking the intializer for the entire record.
+ if (structDecl->isInvalidDecl()) {
+ hadError = true;
+ return;
+ }
+
+ if (DeclType->isUnionType() && IList->getNumInits() == 0) {
+ // Value-initialize the first named member of the union.
+ RecordDecl *RD = DeclType->getAsRecordType()->getDecl();
+ for (RecordDecl::field_iterator FieldEnd = RD->field_end(SemaRef.Context);
+ Field != FieldEnd; ++Field) {
+ if (Field->getDeclName()) {
+ StructuredList->setInitializedFieldInUnion(*Field);
+ break;
+ }
+ }
+ return;
+ }
+
+ // If structDecl is a forward declaration, this loop won't do
+ // anything except look at designated initializers; That's okay,
+ // because an error should get printed out elsewhere. It might be
+ // worthwhile to skip over the rest of the initializer, though.
+ RecordDecl *RD = DeclType->getAsRecordType()->getDecl();
+ RecordDecl::field_iterator FieldEnd = RD->field_end(SemaRef.Context);
+ bool InitializedSomething = false;
+ while (Index < IList->getNumInits()) {
+ Expr *Init = IList->getInit(Index);
+
+ if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) {
+ // If we're not the subobject that matches up with the '{' for
+ // the designator, we shouldn't be handling the
+ // designator. Return immediately.
+ if (!SubobjectIsDesignatorContext)
+ return;
+
+ // Handle this designated initializer. Field will be updated to
+ // the next field that we'll be initializing.
+ if (CheckDesignatedInitializer(IList, DIE, 0,
+ DeclType, &Field, 0, Index,
+ StructuredList, StructuredIndex,
+ true, TopLevelObject))
+ hadError = true;
+
+ InitializedSomething = true;
+ continue;
+ }
+
+ if (Field == FieldEnd) {
+ // We've run out of fields. We're done.
+ break;
+ }
+
+ // We've already initialized a member of a union. We're done.
+ if (InitializedSomething && DeclType->isUnionType())
+ break;
+
+ // If we've hit the flexible array member at the end, we're done.
+ if (Field->getType()->isIncompleteArrayType())
+ break;
+
+ if (Field->isUnnamedBitfield()) {
+ // Don't initialize unnamed bitfields, e.g. "int : 20;"
+ ++Field;
+ continue;
+ }
+
+ CheckSubElementType(IList, Field->getType(), Index,
+ StructuredList, StructuredIndex);
+ InitializedSomething = true;
+
+ if (DeclType->isUnionType()) {
+ // Initialize the first field within the union.
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
+
+ ++Field;
+ }
+
+ if (Field == FieldEnd || !Field->getType()->isIncompleteArrayType() ||
+ Index >= IList->getNumInits())
+ return;
+
+ // Handle GNU flexible array initializers.
+ if (!TopLevelObject &&
+ (!isa<InitListExpr>(IList->getInit(Index)) ||
+ cast<InitListExpr>(IList->getInit(Index))->getNumInits() > 0)) {
+ SemaRef.Diag(IList->getInit(Index)->getSourceRange().getBegin(),
+ diag::err_flexible_array_init_nonempty)
+ << IList->getInit(Index)->getSourceRange().getBegin();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ hadError = true;
+ ++Index;
+ return;
+ } else {
+ SemaRef.Diag(IList->getInit(Index)->getSourceRange().getBegin(),
+ diag::ext_flexible_array_init)
+ << IList->getInit(Index)->getSourceRange().getBegin();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ }
+
+ if (isa<InitListExpr>(IList->getInit(Index)))
+ CheckSubElementType(IList, Field->getType(), Index, StructuredList,
+ StructuredIndex);
+ else
+ CheckImplicitInitList(IList, Field->getType(), Index, StructuredList,
+ StructuredIndex);
+}
+
+/// \brief Expand a field designator that refers to a member of an
+/// anonymous struct or union into a series of field designators that
+/// refers to the field within the appropriate subobject.
+///
+/// Field/FieldIndex will be updated to point to the (new)
+/// currently-designated field.
+static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
+ DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ FieldDecl *Field,
+ RecordDecl::field_iterator &FieldIter,
+ unsigned &FieldIndex) {
+ typedef DesignatedInitExpr::Designator Designator;
+
+ // Build the path from the current object to the member of the
+ // anonymous struct/union (backwards).
+ llvm::SmallVector<FieldDecl *, 4> Path;
+ SemaRef.BuildAnonymousStructUnionMemberPath(Field, Path);
+
+ // Build the replacement designators.
+ llvm::SmallVector<Designator, 4> Replacements;
+ for (llvm::SmallVector<FieldDecl *, 4>::reverse_iterator
+ FI = Path.rbegin(), FIEnd = Path.rend();
+ FI != FIEnd; ++FI) {
+ if (FI + 1 == FIEnd)
+ Replacements.push_back(Designator((IdentifierInfo *)0,
+ DIE->getDesignator(DesigIdx)->getDotLoc(),
+ DIE->getDesignator(DesigIdx)->getFieldLoc()));
+ else
+ Replacements.push_back(Designator((IdentifierInfo *)0, SourceLocation(),
+ SourceLocation()));
+ Replacements.back().setField(*FI);
+ }
+
+ // Expand the current designator into the set of replacement
+ // designators, so we have a full subobject path down to where the
+ // member of the anonymous struct/union is actually stored.
+ DIE->ExpandDesignator(DesigIdx, &Replacements[0],
+ &Replacements[0] + Replacements.size());
+
+ // Update FieldIter/FieldIndex;
+ RecordDecl *Record = cast<RecordDecl>(Path.back()->getDeclContext());
+ FieldIter = Record->field_begin(SemaRef.Context);
+ FieldIndex = 0;
+ for (RecordDecl::field_iterator FEnd = Record->field_end(SemaRef.Context);
+ FieldIter != FEnd; ++FieldIter) {
+ if (FieldIter->isUnnamedBitfield())
+ continue;
+
+ if (*FieldIter == Path.back())
+ return;
+
+ ++FieldIndex;
+ }
+
+ assert(false && "Unable to find anonymous struct/union field");
+}
+
+/// @brief Check the well-formedness of a C99 designated initializer.
+///
+/// Determines whether the designated initializer @p DIE, which
+/// resides at the given @p Index within the initializer list @p
+/// IList, is well-formed for a current object of type @p DeclType
+/// (C99 6.7.8). The actual subobject that this designator refers to
+/// within the current subobject is returned in either
+/// @p NextField or @p NextElementIndex (whichever is appropriate).
+///
+/// @param IList The initializer list in which this designated
+/// initializer occurs.
+///
+/// @param DIE The designated initializer expression.
+///
+/// @param DesigIdx The index of the current designator.
+///
+/// @param DeclType The type of the "current object" (C99 6.7.8p17),
+/// into which the designation in @p DIE should refer.
+///
+/// @param NextField If non-NULL and the first designator in @p DIE is
+/// a field, this will be set to the field declaration corresponding
+/// to the field named by the designator.
+///
+/// @param NextElementIndex If non-NULL and the first designator in @p
+/// DIE is an array designator or GNU array-range designator, this
+/// will be set to the last index initialized by this designator.
+///
+/// @param Index Index into @p IList where the designated initializer
+/// @p DIE occurs.
+///
+/// @param StructuredList The initializer list expression that
+/// describes all of the subobject initializers in the order they'll
+/// actually be initialized.
+///
+/// @returns true if there was an error, false otherwise.
+bool
+InitListChecker::CheckDesignatedInitializer(InitListExpr *IList,
+ DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ QualType &CurrentObjectType,
+ RecordDecl::field_iterator *NextField,
+ llvm::APSInt *NextElementIndex,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool FinishSubobjectInit,
+ bool TopLevelObject) {
+ if (DesigIdx == DIE->size()) {
+ // Check the actual initialization for the designated object type.
+ bool prevHadError = hadError;
+
+ // Temporarily remove the designator expression from the
+ // initializer list that the child calls see, so that we don't try
+ // to re-process the designator.
+ unsigned OldIndex = Index;
+ IList->setInit(OldIndex, DIE->getInit());
+
+ CheckSubElementType(IList, CurrentObjectType, Index,
+ StructuredList, StructuredIndex);
+
+ // Restore the designated initializer expression in the syntactic
+ // form of the initializer list.
+ if (IList->getInit(OldIndex) != DIE->getInit())
+ DIE->setInit(IList->getInit(OldIndex));
+ IList->setInit(OldIndex, DIE);
+
+ return hadError && !prevHadError;
+ }
+
+ bool IsFirstDesignator = (DesigIdx == 0);
+ assert((IsFirstDesignator || StructuredList) &&
+ "Need a non-designated initializer list to start from");
+
+ DesignatedInitExpr::Designator *D = DIE->getDesignator(DesigIdx);
+ // Determine the structural initializer list that corresponds to the
+ // current subobject.
+ StructuredList = IsFirstDesignator? SyntacticToSemantic[IList]
+ : getStructuredSubobjectInit(IList, Index, CurrentObjectType,
+ StructuredList, StructuredIndex,
+ SourceRange(D->getStartLocation(),
+ DIE->getSourceRange().getEnd()));
+ assert(StructuredList && "Expected a structured initializer list");
+
+ if (D->isFieldDesignator()) {
+ // C99 6.7.8p7:
+ //
+ // If a designator has the form
+ //
+ // . identifier
+ //
+ // then the current object (defined below) shall have
+ // structure or union type and the identifier shall be the
+ // name of a member of that type.
+ const RecordType *RT = CurrentObjectType->getAsRecordType();
+ if (!RT) {
+ SourceLocation Loc = D->getDotLoc();
+ if (Loc.isInvalid())
+ Loc = D->getFieldLoc();
+ SemaRef.Diag(Loc, diag::err_field_designator_non_aggr)
+ << SemaRef.getLangOptions().CPlusPlus << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+
+ // Note: we perform a linear search of the fields here, despite
+ // the fact that we have a faster lookup method, because we always
+ // need to compute the field's index.
+ FieldDecl *KnownField = D->getField();
+ IdentifierInfo *FieldName = D->getFieldName();
+ unsigned FieldIndex = 0;
+ RecordDecl::field_iterator
+ Field = RT->getDecl()->field_begin(SemaRef.Context),
+ FieldEnd = RT->getDecl()->field_end(SemaRef.Context);
+ for (; Field != FieldEnd; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ if (KnownField == *Field || Field->getIdentifier() == FieldName)
+ break;
+
+ ++FieldIndex;
+ }
+
+ if (Field == FieldEnd) {
+ // There was no normal field in the struct with the designated
+ // name. Perform another lookup for this name, which may find
+ // something that we can't designate (e.g., a member function),
+ // may find nothing, or may find a member of an anonymous
+ // struct/union.
+ DeclContext::lookup_result Lookup
+ = RT->getDecl()->lookup(SemaRef.Context, FieldName);
+ if (Lookup.first == Lookup.second) {
+ // Name lookup didn't find anything.
+ SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown)
+ << FieldName << CurrentObjectType;
+ ++Index;
+ return true;
+ } else if (!KnownField && isa<FieldDecl>(*Lookup.first) &&
+ cast<RecordDecl>((*Lookup.first)->getDeclContext())
+ ->isAnonymousStructOrUnion()) {
+ // Handle an field designator that refers to a member of an
+ // anonymous struct or union.
+ ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx,
+ cast<FieldDecl>(*Lookup.first),
+ Field, FieldIndex);
+ D = DIE->getDesignator(DesigIdx);
+ } else {
+ // Name lookup found something, but it wasn't a field.
+ SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_nonfield)
+ << FieldName;
+ SemaRef.Diag((*Lookup.first)->getLocation(),
+ diag::note_field_designator_found);
+ ++Index;
+ return true;
+ }
+ } else if (!KnownField &&
+ cast<RecordDecl>((*Field)->getDeclContext())
+ ->isAnonymousStructOrUnion()) {
+ ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, *Field,
+ Field, FieldIndex);
+ D = DIE->getDesignator(DesigIdx);
+ }
+
+ // All of the fields of a union are located at the same place in
+ // the initializer list.
+ if (RT->getDecl()->isUnion()) {
+ FieldIndex = 0;
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
+
+ // Update the designator with the field declaration.
+ D->setField(*Field);
+
+ // Make sure that our non-designated initializer list has space
+ // for a subobject corresponding to this field.
+ if (FieldIndex >= StructuredList->getNumInits())
+ StructuredList->resizeInits(SemaRef.Context, FieldIndex + 1);
+
+ // This designator names a flexible array member.
+ if (Field->getType()->isIncompleteArrayType()) {
+ bool Invalid = false;
+ if ((DesigIdx + 1) != DIE->size()) {
+ // We can't designate an object within the flexible array
+ // member (because GCC doesn't allow it).
+ DesignatedInitExpr::Designator *NextD
+ = DIE->getDesignator(DesigIdx + 1);
+ SemaRef.Diag(NextD->getStartLocation(),
+ diag::err_designator_into_flexible_array_member)
+ << SourceRange(NextD->getStartLocation(),
+ DIE->getSourceRange().getEnd());
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ Invalid = true;
+ }
+
+ if (!hadError && !isa<InitListExpr>(DIE->getInit())) {
+ // The initializer is not an initializer list.
+ SemaRef.Diag(DIE->getInit()->getSourceRange().getBegin(),
+ diag::err_flexible_array_init_needs_braces)
+ << DIE->getInit()->getSourceRange();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ Invalid = true;
+ }
+
+ // Handle GNU flexible array initializers.
+ if (!Invalid && !TopLevelObject &&
+ cast<InitListExpr>(DIE->getInit())->getNumInits() > 0) {
+ SemaRef.Diag(DIE->getSourceRange().getBegin(),
+ diag::err_flexible_array_init_nonempty)
+ << DIE->getSourceRange().getBegin();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ Invalid = true;
+ }
+
+ if (Invalid) {
+ ++Index;
+ return true;
+ }
+
+ // Initialize the array.
+ bool prevHadError = hadError;
+ unsigned newStructuredIndex = FieldIndex;
+ unsigned OldIndex = Index;
+ IList->setInit(Index, DIE->getInit());
+ CheckSubElementType(IList, Field->getType(), Index,
+ StructuredList, newStructuredIndex);
+ IList->setInit(OldIndex, DIE);
+ if (hadError && !prevHadError) {
+ ++Field;
+ ++FieldIndex;
+ if (NextField)
+ *NextField = Field;
+ StructuredIndex = FieldIndex;
+ return true;
+ }
+ } else {
+ // Recurse to check later designated subobjects.
+ QualType FieldType = (*Field)->getType();
+ unsigned newStructuredIndex = FieldIndex;
+ if (CheckDesignatedInitializer(IList, DIE, DesigIdx + 1, FieldType, 0, 0,
+ Index, StructuredList, newStructuredIndex,
+ true, false))
+ return true;
+ }
+
+ // Find the position of the next field to be initialized in this
+ // subobject.
+ ++Field;
+ ++FieldIndex;
+
+ // If this the first designator, our caller will continue checking
+ // the rest of this struct/class/union subobject.
+ if (IsFirstDesignator) {
+ if (NextField)
+ *NextField = Field;
+ StructuredIndex = FieldIndex;
+ return false;
+ }
+
+ if (!FinishSubobjectInit)
+ return false;
+
+ // We've already initialized something in the union; we're done.
+ if (RT->getDecl()->isUnion())
+ return hadError;
+
+ // Check the remaining fields within this class/struct/union subobject.
+ bool prevHadError = hadError;
+ CheckStructUnionTypes(IList, CurrentObjectType, Field, false, Index,
+ StructuredList, FieldIndex);
+ return hadError && !prevHadError;
+ }
+
+ // C99 6.7.8p6:
+ //
+ // If a designator has the form
+ //
+ // [ constant-expression ]
+ //
+ // then the current object (defined below) shall have array
+ // type and the expression shall be an integer constant
+ // expression. If the array is of unknown size, any
+ // nonnegative value is valid.
+ //
+ // Additionally, cope with the GNU extension that permits
+ // designators of the form
+ //
+ // [ constant-expression ... constant-expression ]
+ const ArrayType *AT = SemaRef.Context.getAsArrayType(CurrentObjectType);
+ if (!AT) {
+ SemaRef.Diag(D->getLBracketLoc(), diag::err_array_designator_non_array)
+ << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+
+ Expr *IndexExpr = 0;
+ llvm::APSInt DesignatedStartIndex, DesignatedEndIndex;
+ if (D->isArrayDesignator()) {
+ IndexExpr = DIE->getArrayIndex(*D);
+ DesignatedStartIndex = IndexExpr->EvaluateAsInt(SemaRef.Context);
+ DesignatedEndIndex = DesignatedStartIndex;
+ } else {
+ assert(D->isArrayRangeDesignator() && "Need array-range designator");
+
+
+ DesignatedStartIndex =
+ DIE->getArrayRangeStart(*D)->EvaluateAsInt(SemaRef.Context);
+ DesignatedEndIndex =
+ DIE->getArrayRangeEnd(*D)->EvaluateAsInt(SemaRef.Context);
+ IndexExpr = DIE->getArrayRangeEnd(*D);
+
+ if (DesignatedStartIndex.getZExtValue() !=DesignatedEndIndex.getZExtValue())
+ FullyStructuredList->sawArrayRangeDesignator();
+ }
+
+ if (isa<ConstantArrayType>(AT)) {
+ llvm::APSInt MaxElements(cast<ConstantArrayType>(AT)->getSize(), false);
+ DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedStartIndex.setIsUnsigned(MaxElements.isUnsigned());
+ DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned());
+ if (DesignatedEndIndex >= MaxElements) {
+ SemaRef.Diag(IndexExpr->getSourceRange().getBegin(),
+ diag::err_array_designator_too_large)
+ << DesignatedEndIndex.toString(10) << MaxElements.toString(10)
+ << IndexExpr->getSourceRange();
+ ++Index;
+ return true;
+ }
+ } else {
+ // Make sure the bit-widths and signedness match.
+ if (DesignatedStartIndex.getBitWidth() > DesignatedEndIndex.getBitWidth())
+ DesignatedEndIndex.extend(DesignatedStartIndex.getBitWidth());
+ else if (DesignatedStartIndex.getBitWidth() <
+ DesignatedEndIndex.getBitWidth())
+ DesignatedStartIndex.extend(DesignatedEndIndex.getBitWidth());
+ DesignatedStartIndex.setIsUnsigned(true);
+ DesignatedEndIndex.setIsUnsigned(true);
+ }
+
+ // Make sure that our non-designated initializer list has space
+ // for a subobject corresponding to this array element.
+ if (DesignatedEndIndex.getZExtValue() >= StructuredList->getNumInits())
+ StructuredList->resizeInits(SemaRef.Context,
+ DesignatedEndIndex.getZExtValue() + 1);
+
+ // Repeatedly perform subobject initializations in the range
+ // [DesignatedStartIndex, DesignatedEndIndex].
+
+ // Move to the next designator
+ unsigned ElementIndex = DesignatedStartIndex.getZExtValue();
+ unsigned OldIndex = Index;
+ while (DesignatedStartIndex <= DesignatedEndIndex) {
+ // Recurse to check later designated subobjects.
+ QualType ElementType = AT->getElementType();
+ Index = OldIndex;
+ if (CheckDesignatedInitializer(IList, DIE, DesigIdx + 1, ElementType, 0, 0,
+ Index, StructuredList, ElementIndex,
+ (DesignatedStartIndex == DesignatedEndIndex),
+ false))
+ return true;
+
+ // Move to the next index in the array that we'll be initializing.
+ ++DesignatedStartIndex;
+ ElementIndex = DesignatedStartIndex.getZExtValue();
+ }
+
+ // If this the first designator, our caller will continue checking
+ // the rest of this array subobject.
+ if (IsFirstDesignator) {
+ if (NextElementIndex)
+ *NextElementIndex = DesignatedStartIndex;
+ StructuredIndex = ElementIndex;
+ return false;
+ }
+
+ if (!FinishSubobjectInit)
+ return false;
+
+ // Check the remaining elements within this array subobject.
+ bool prevHadError = hadError;
+ CheckArrayType(IList, CurrentObjectType, DesignatedStartIndex, false, Index,
+ StructuredList, ElementIndex);
+ return hadError && !prevHadError;
+}
+
+// Get the structured initializer list for a subobject of type
+// @p CurrentObjectType.
+InitListExpr *
+InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
+ QualType CurrentObjectType,
+ InitListExpr *StructuredList,
+ unsigned StructuredIndex,
+ SourceRange InitRange) {
+ Expr *ExistingInit = 0;
+ if (!StructuredList)
+ ExistingInit = SyntacticToSemantic[IList];
+ else if (StructuredIndex < StructuredList->getNumInits())
+ ExistingInit = StructuredList->getInit(StructuredIndex);
+
+ if (InitListExpr *Result = dyn_cast_or_null<InitListExpr>(ExistingInit))
+ return Result;
+
+ if (ExistingInit) {
+ // We are creating an initializer list that initializes the
+ // subobjects of the current object, but there was already an
+ // initialization that completely initialized the current
+ // subobject, e.g., by a compound literal:
+ //
+ // struct X { int a, b; };
+ // struct X xs[] = { [0] = (struct X) { 1, 2 }, [0].b = 3 };
+ //
+ // Here, xs[0].a == 0 and xs[0].b == 3, since the second,
+ // designated initializer re-initializes the whole
+ // subobject [0], overwriting previous initializers.
+ SemaRef.Diag(InitRange.getBegin(),
+ diag::warn_subobject_initializer_overrides)
+ << InitRange;
+ SemaRef.Diag(ExistingInit->getSourceRange().getBegin(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << ExistingInit->getSourceRange();
+ }
+
+ InitListExpr *Result
+ = new (SemaRef.Context) InitListExpr(InitRange.getBegin(), 0, 0,
+ InitRange.getEnd());
+
+ Result->setType(CurrentObjectType);
+
+ // Pre-allocate storage for the structured initializer list.
+ unsigned NumElements = 0;
+ unsigned NumInits = 0;
+ if (!StructuredList)
+ NumInits = IList->getNumInits();
+ else if (Index < IList->getNumInits()) {
+ if (InitListExpr *SubList = dyn_cast<InitListExpr>(IList->getInit(Index)))
+ NumInits = SubList->getNumInits();
+ }
+
+ if (const ArrayType *AType
+ = SemaRef.Context.getAsArrayType(CurrentObjectType)) {
+ if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType)) {
+ NumElements = CAType->getSize().getZExtValue();
+ // Simple heuristic so that we don't allocate a very large
+ // initializer with many empty entries at the end.
+ if (NumInits && NumElements > NumInits)
+ NumElements = 0;
+ }
+ } else if (const VectorType *VType = CurrentObjectType->getAsVectorType())
+ NumElements = VType->getNumElements();
+ else if (const RecordType *RType = CurrentObjectType->getAsRecordType()) {
+ RecordDecl *RDecl = RType->getDecl();
+ if (RDecl->isUnion())
+ NumElements = 1;
+ else
+ NumElements = std::distance(RDecl->field_begin(SemaRef.Context),
+ RDecl->field_end(SemaRef.Context));
+ }
+
+ if (NumElements < NumInits)
+ NumElements = IList->getNumInits();
+
+ Result->reserveInits(NumElements);
+
+ // Link this new initializer list into the structured initializer
+ // lists.
+ if (StructuredList)
+ StructuredList->updateInit(StructuredIndex, Result);
+ else {
+ Result->setSyntacticForm(IList);
+ SyntacticToSemantic[IList] = Result;
+ }
+
+ return Result;
+}
+
+/// Update the initializer at index @p StructuredIndex within the
+/// structured initializer list to the value @p expr.
+void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ Expr *expr) {
+ // No structured initializer list to update
+ if (!StructuredList)
+ return;
+
+ if (Expr *PrevInit = StructuredList->updateInit(StructuredIndex, expr)) {
+ // This initializer overwrites a previous initializer. Warn.
+ SemaRef.Diag(expr->getSourceRange().getBegin(),
+ diag::warn_initializer_overrides)
+ << expr->getSourceRange();
+ SemaRef.Diag(PrevInit->getSourceRange().getBegin(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << PrevInit->getSourceRange();
+ }
+
+ ++StructuredIndex;
+}
+
+/// Check that the given Index expression is a valid array designator
+/// value. This is essentailly just a wrapper around
+/// VerifyIntegerConstantExpression that also checks for negative values
+/// and produces a reasonable diagnostic if there is a
+/// failure. Returns true if there was an error, false otherwise. If
+/// everything went okay, Value will receive the value of the constant
+/// expression.
+static bool
+CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
+ SourceLocation Loc = Index->getSourceRange().getBegin();
+
+ // Make sure this is an integer constant expression.
+ if (S.VerifyIntegerConstantExpression(Index, &Value))
+ return true;
+
+ if (Value.isSigned() && Value.isNegative())
+ return S.Diag(Loc, diag::err_array_designator_negative)
+ << Value.toString(10) << Index->getSourceRange();
+
+ Value.setIsUnsigned(true);
+ return false;
+}
+
+Sema::OwningExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
+ SourceLocation Loc,
+ bool GNUSyntax,
+ OwningExprResult Init) {
+ typedef DesignatedInitExpr::Designator ASTDesignator;
+
+ bool Invalid = false;
+ llvm::SmallVector<ASTDesignator, 32> Designators;
+ llvm::SmallVector<Expr *, 32> InitExpressions;
+
+ // Build designators and check array designator expressions.
+ for (unsigned Idx = 0; Idx < Desig.getNumDesignators(); ++Idx) {
+ const Designator &D = Desig.getDesignator(Idx);
+ switch (D.getKind()) {
+ case Designator::FieldDesignator:
+ Designators.push_back(ASTDesignator(D.getField(), D.getDotLoc(),
+ D.getFieldLoc()));
+ break;
+
+ case Designator::ArrayDesignator: {
+ Expr *Index = static_cast<Expr *>(D.getArrayIndex());
+ llvm::APSInt IndexValue;
+ if (!Index->isTypeDependent() &&
+ !Index->isValueDependent() &&
+ CheckArrayDesignatorExpr(*this, Index, IndexValue))
+ Invalid = true;
+ else {
+ Designators.push_back(ASTDesignator(InitExpressions.size(),
+ D.getLBracketLoc(),
+ D.getRBracketLoc()));
+ InitExpressions.push_back(Index);
+ }
+ break;
+ }
+
+ case Designator::ArrayRangeDesignator: {
+ Expr *StartIndex = static_cast<Expr *>(D.getArrayRangeStart());
+ Expr *EndIndex = static_cast<Expr *>(D.getArrayRangeEnd());
+ llvm::APSInt StartValue;
+ llvm::APSInt EndValue;
+ bool StartDependent = StartIndex->isTypeDependent() ||
+ StartIndex->isValueDependent();
+ bool EndDependent = EndIndex->isTypeDependent() ||
+ EndIndex->isValueDependent();
+ if ((!StartDependent &&
+ CheckArrayDesignatorExpr(*this, StartIndex, StartValue)) ||
+ (!EndDependent &&
+ CheckArrayDesignatorExpr(*this, EndIndex, EndValue)))
+ Invalid = true;
+ else {
+ // Make sure we're comparing values with the same bit width.
+ if (StartDependent || EndDependent) {
+ // Nothing to compute.
+ } else if (StartValue.getBitWidth() > EndValue.getBitWidth())
+ EndValue.extend(StartValue.getBitWidth());
+ else if (StartValue.getBitWidth() < EndValue.getBitWidth())
+ StartValue.extend(EndValue.getBitWidth());
+
+ if (!StartDependent && !EndDependent && EndValue < StartValue) {
+ Diag(D.getEllipsisLoc(), diag::err_array_designator_empty_range)
+ << StartValue.toString(10) << EndValue.toString(10)
+ << StartIndex->getSourceRange() << EndIndex->getSourceRange();
+ Invalid = true;
+ } else {
+ Designators.push_back(ASTDesignator(InitExpressions.size(),
+ D.getLBracketLoc(),
+ D.getEllipsisLoc(),
+ D.getRBracketLoc()));
+ InitExpressions.push_back(StartIndex);
+ InitExpressions.push_back(EndIndex);
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ if (Invalid || Init.isInvalid())
+ return ExprError();
+
+ // Clear out the expressions within the designation.
+ Desig.ClearExprs(*this);
+
+ DesignatedInitExpr *DIE
+ = DesignatedInitExpr::Create(Context,
+ Designators.data(), Designators.size(),
+ InitExpressions.data(), InitExpressions.size(),
+ Loc, GNUSyntax, Init.takeAs<Expr>());
+ return Owned(DIE);
+}
+
+bool Sema::CheckInitList(InitListExpr *&InitList, QualType &DeclType) {
+ InitListChecker CheckInitList(*this, InitList, DeclType);
+ if (!CheckInitList.HadError())
+ InitList = CheckInitList.getFullyStructuredList();
+
+ return CheckInitList.HadError();
+}
+
+/// \brief Diagnose any semantic errors with value-initialization of
+/// the given type.
+///
+/// Value-initialization effectively zero-initializes any types
+/// without user-declared constructors, and calls the default
+/// constructor for a for any type that has a user-declared
+/// constructor (C++ [dcl.init]p5). Value-initialization can fail when
+/// a type with a user-declared constructor does not have an
+/// accessible, non-deleted default constructor. In C, everything can
+/// be value-initialized, which corresponds to C's notion of
+/// initializing objects with static storage duration when no
+/// initializer is provided for that object.
+///
+/// \returns true if there was an error, false otherwise.
+bool Sema::CheckValueInitialization(QualType Type, SourceLocation Loc) {
+ // C++ [dcl.init]p5:
+ //
+ // To value-initialize an object of type T means:
+
+ // -- if T is an array type, then each element is value-initialized;
+ if (const ArrayType *AT = Context.getAsArrayType(Type))
+ return CheckValueInitialization(AT->getElementType(), Loc);
+
+ if (const RecordType *RT = Type->getAsRecordType()) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // -- if T is a class type (clause 9) with a user-declared
+ // constructor (12.1), then the default constructor for T is
+ // called (and the initialization is ill-formed if T has no
+ // accessible default constructor);
+ if (ClassDecl->hasUserDeclaredConstructor())
+ // FIXME: Eventually, we'll need to put the constructor decl into the
+ // AST.
+ return PerformInitializationByConstructor(Type, 0, 0, Loc,
+ SourceRange(Loc),
+ DeclarationName(),
+ IK_Direct);
+ }
+ }
+
+ if (Type->isReferenceType()) {
+ // C++ [dcl.init]p5:
+ // [...] A program that calls for default-initialization or
+ // value-initialization of an entity of reference type is
+ // ill-formed. [...]
+ // FIXME: Once we have code that goes through this path, add an actual
+ // diagnostic :)
+ }
+
+ return false;
+}
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
new file mode 100644
index 0000000..6212449
--- /dev/null
+++ b/lib/Sema/SemaLookup.cpp
@@ -0,0 +1,1626 @@
+//===--------------------- SemaLookup.cpp - Name Lookup ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements name lookup for C, C++, Objective-C, and
+// Objective-C++.
+//
+//===----------------------------------------------------------------------===//
+#include "Sema.h"
+#include "SemaInherit.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include <set>
+#include <vector>
+#include <iterator>
+#include <utility>
+#include <algorithm>
+
+using namespace clang;
+
+typedef llvm::SmallVector<UsingDirectiveDecl*, 4> UsingDirectivesTy;
+typedef llvm::DenseSet<NamespaceDecl*> NamespaceSet;
+typedef llvm::SmallVector<Sema::LookupResult, 3> LookupResultsTy;
+
+/// UsingDirAncestorCompare - Implements strict weak ordering of
+/// UsingDirectives. It orders them by address of its common ancestor.
+struct UsingDirAncestorCompare {
+
+ /// @brief Compares UsingDirectiveDecl common ancestor with DeclContext.
+ bool operator () (UsingDirectiveDecl *U, const DeclContext *Ctx) const {
+ return U->getCommonAncestor() < Ctx;
+ }
+
+ /// @brief Compares UsingDirectiveDecl common ancestor with DeclContext.
+ bool operator () (const DeclContext *Ctx, UsingDirectiveDecl *U) const {
+ return Ctx < U->getCommonAncestor();
+ }
+
+ /// @brief Compares UsingDirectiveDecl common ancestors.
+ bool operator () (UsingDirectiveDecl *U1, UsingDirectiveDecl *U2) const {
+ return U1->getCommonAncestor() < U2->getCommonAncestor();
+ }
+};
+
+/// AddNamespaceUsingDirectives - Adds all UsingDirectiveDecl's to heap UDirs
+/// (ordered by common ancestors), found in namespace NS,
+/// including all found (recursively) in their nominated namespaces.
+void AddNamespaceUsingDirectives(ASTContext &Context,
+ DeclContext *NS,
+ UsingDirectivesTy &UDirs,
+ NamespaceSet &Visited) {
+ DeclContext::udir_iterator I, End;
+
+ for (llvm::tie(I, End) = NS->getUsingDirectives(Context); I !=End; ++I) {
+ UDirs.push_back(*I);
+ std::push_heap(UDirs.begin(), UDirs.end(), UsingDirAncestorCompare());
+ NamespaceDecl *Nominated = (*I)->getNominatedNamespace();
+ if (Visited.insert(Nominated).second)
+ AddNamespaceUsingDirectives(Context, Nominated, UDirs, /*ref*/ Visited);
+ }
+}
+
+/// AddScopeUsingDirectives - Adds all UsingDirectiveDecl's found in Scope S,
+/// including all found in the namespaces they nominate.
+static void AddScopeUsingDirectives(ASTContext &Context, Scope *S,
+ UsingDirectivesTy &UDirs) {
+ NamespaceSet VisitedNS;
+
+ if (DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity())) {
+
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(Ctx))
+ VisitedNS.insert(NS);
+
+ AddNamespaceUsingDirectives(Context, Ctx, UDirs, /*ref*/ VisitedNS);
+
+ } else {
+ Scope::udir_iterator I = S->using_directives_begin(),
+ End = S->using_directives_end();
+
+ for (; I != End; ++I) {
+ UsingDirectiveDecl *UD = I->getAs<UsingDirectiveDecl>();
+ UDirs.push_back(UD);
+ std::push_heap(UDirs.begin(), UDirs.end(), UsingDirAncestorCompare());
+
+ NamespaceDecl *Nominated = UD->getNominatedNamespace();
+ if (!VisitedNS.count(Nominated)) {
+ VisitedNS.insert(Nominated);
+ AddNamespaceUsingDirectives(Context, Nominated, UDirs,
+ /*ref*/ VisitedNS);
+ }
+ }
+ }
+}
+
+/// MaybeConstructOverloadSet - Name lookup has determined that the
+/// elements in [I, IEnd) have the name that we are looking for, and
+/// *I is a match for the namespace. This routine returns an
+/// appropriate Decl for name lookup, which may either be *I or an
+/// OverloadedFunctionDecl that represents the overloaded functions in
+/// [I, IEnd).
+///
+/// The existance of this routine is temporary; users of LookupResult
+/// should be able to handle multiple results, to deal with cases of
+/// ambiguity and overloaded functions without needing to create a
+/// Decl node.
+template<typename DeclIterator>
+static NamedDecl *
+MaybeConstructOverloadSet(ASTContext &Context,
+ DeclIterator I, DeclIterator IEnd) {
+ assert(I != IEnd && "Iterator range cannot be empty");
+ assert(!isa<OverloadedFunctionDecl>(*I) &&
+ "Cannot have an overloaded function");
+
+ if (isa<FunctionDecl>(*I)) {
+ // If we found a function, there might be more functions. If
+ // so, collect them into an overload set.
+ DeclIterator Last = I;
+ OverloadedFunctionDecl *Ovl = 0;
+ for (++Last; Last != IEnd && isa<FunctionDecl>(*Last); ++Last) {
+ if (!Ovl) {
+ // FIXME: We leak this overload set. Eventually, we want to stop
+ // building the declarations for these overload sets, so there will be
+ // nothing to leak.
+ Ovl = OverloadedFunctionDecl::Create(Context, (*I)->getDeclContext(),
+ (*I)->getDeclName());
+ Ovl->addOverload(cast<FunctionDecl>(*I));
+ }
+ Ovl->addOverload(cast<FunctionDecl>(*Last));
+ }
+
+ // If we had more than one function, we built an overload
+ // set. Return it.
+ if (Ovl)
+ return Ovl;
+ }
+
+ return *I;
+}
+
+/// Merges together multiple LookupResults dealing with duplicated Decl's.
+static Sema::LookupResult
+MergeLookupResults(ASTContext &Context, LookupResultsTy &Results) {
+ typedef Sema::LookupResult LResult;
+ typedef llvm::SmallPtrSet<NamedDecl*, 4> DeclsSetTy;
+
+ // Remove duplicated Decl pointing at same Decl, by storing them in
+ // associative collection. This might be case for code like:
+ //
+ // namespace A { int i; }
+ // namespace B { using namespace A; }
+ // namespace C { using namespace A; }
+ //
+ // void foo() {
+ // using namespace B;
+ // using namespace C;
+ // ++i; // finds A::i, from both namespace B and C at global scope
+ // }
+ //
+ // C++ [namespace.qual].p3:
+ // The same declaration found more than once is not an ambiguity
+ // (because it is still a unique declaration).
+ DeclsSetTy FoundDecls;
+
+ // Counter of tag names, and functions for resolving ambiguity
+ // and name hiding.
+ std::size_t TagNames = 0, Functions = 0, OrdinaryNonFunc = 0;
+
+ LookupResultsTy::iterator I = Results.begin(), End = Results.end();
+
+ // No name lookup results, return early.
+ if (I == End) return LResult::CreateLookupResult(Context, 0);
+
+ // Keep track of the tag declaration we found. We only use this if
+ // we find a single tag declaration.
+ TagDecl *TagFound = 0;
+
+ for (; I != End; ++I) {
+ switch (I->getKind()) {
+ case LResult::NotFound:
+ assert(false &&
+ "Should be always successful name lookup result here.");
+ break;
+
+ case LResult::AmbiguousReference:
+ case LResult::AmbiguousBaseSubobjectTypes:
+ case LResult::AmbiguousBaseSubobjects:
+ assert(false && "Shouldn't get ambiguous lookup here.");
+ break;
+
+ case LResult::Found: {
+ NamedDecl *ND = I->getAsDecl();
+ if (TagDecl *TD = dyn_cast<TagDecl>(ND)) {
+ TagFound = Context.getCanonicalDecl(TD);
+ TagNames += FoundDecls.insert(TagFound)? 1 : 0;
+ } else if (isa<FunctionDecl>(ND))
+ Functions += FoundDecls.insert(ND)? 1 : 0;
+ else
+ FoundDecls.insert(ND);
+ break;
+ }
+
+ case LResult::FoundOverloaded:
+ for (LResult::iterator FI = I->begin(), FEnd = I->end(); FI != FEnd; ++FI)
+ Functions += FoundDecls.insert(*FI)? 1 : 0;
+ break;
+ }
+ }
+ OrdinaryNonFunc = FoundDecls.size() - TagNames - Functions;
+ bool Ambiguous = false, NameHidesTags = false;
+
+ if (FoundDecls.size() == 1) {
+ // 1) Exactly one result.
+ } else if (TagNames > 1) {
+ // 2) Multiple tag names (even though they may be hidden by an
+ // object name).
+ Ambiguous = true;
+ } else if (FoundDecls.size() - TagNames == 1) {
+ // 3) Ordinary name hides (optional) tag.
+ NameHidesTags = TagFound;
+ } else if (Functions) {
+ // C++ [basic.lookup].p1:
+ // ... Name lookup may associate more than one declaration with
+ // a name if it finds the name to be a function name; the declarations
+ // are said to form a set of overloaded functions (13.1).
+ // Overload resolution (13.3) takes place after name lookup has succeeded.
+ //
+ if (!OrdinaryNonFunc) {
+ // 4) Functions hide tag names.
+ NameHidesTags = TagFound;
+ } else {
+ // 5) Functions + ordinary names.
+ Ambiguous = true;
+ }
+ } else {
+ // 6) Multiple non-tag names
+ Ambiguous = true;
+ }
+
+ if (Ambiguous)
+ return LResult::CreateLookupResult(Context,
+ FoundDecls.begin(), FoundDecls.size());
+ if (NameHidesTags) {
+ // There's only one tag, TagFound. Remove it.
+ assert(TagFound && FoundDecls.count(TagFound) && "No tag name found?");
+ FoundDecls.erase(TagFound);
+ }
+
+ // Return successful name lookup result.
+ return LResult::CreateLookupResult(Context,
+ MaybeConstructOverloadSet(Context,
+ FoundDecls.begin(),
+ FoundDecls.end()));
+}
+
+// Retrieve the set of identifier namespaces that correspond to a
+// specific kind of name lookup.
+inline unsigned
+getIdentifierNamespacesFromLookupNameKind(Sema::LookupNameKind NameKind,
+ bool CPlusPlus) {
+ unsigned IDNS = 0;
+ switch (NameKind) {
+ case Sema::LookupOrdinaryName:
+ case Sema::LookupOperatorName:
+ case Sema::LookupRedeclarationWithLinkage:
+ IDNS = Decl::IDNS_Ordinary;
+ if (CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Member;
+ break;
+
+ case Sema::LookupTagName:
+ IDNS = Decl::IDNS_Tag;
+ break;
+
+ case Sema::LookupMemberName:
+ IDNS = Decl::IDNS_Member;
+ if (CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Ordinary;
+ break;
+
+ case Sema::LookupNestedNameSpecifierName:
+ case Sema::LookupNamespaceName:
+ IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member;
+ break;
+
+ case Sema::LookupObjCProtocolName:
+ IDNS = Decl::IDNS_ObjCProtocol;
+ break;
+
+ case Sema::LookupObjCImplementationName:
+ IDNS = Decl::IDNS_ObjCImplementation;
+ break;
+
+ case Sema::LookupObjCCategoryImplName:
+ IDNS = Decl::IDNS_ObjCCategoryImpl;
+ break;
+ }
+ return IDNS;
+}
+
+Sema::LookupResult
+Sema::LookupResult::CreateLookupResult(ASTContext &Context, NamedDecl *D) {
+ if (ObjCCompatibleAliasDecl *Alias
+ = dyn_cast_or_null<ObjCCompatibleAliasDecl>(D))
+ D = Alias->getClassInterface();
+
+ LookupResult Result;
+ Result.StoredKind = (D && isa<OverloadedFunctionDecl>(D))?
+ OverloadedDeclSingleDecl : SingleDecl;
+ Result.First = reinterpret_cast<uintptr_t>(D);
+ Result.Last = 0;
+ Result.Context = &Context;
+ return Result;
+}
+
+/// @brief Moves the name-lookup results from Other to this LookupResult.
+Sema::LookupResult
+Sema::LookupResult::CreateLookupResult(ASTContext &Context,
+ IdentifierResolver::iterator F,
+ IdentifierResolver::iterator L) {
+ LookupResult Result;
+ Result.Context = &Context;
+
+ if (F != L && isa<FunctionDecl>(*F)) {
+ IdentifierResolver::iterator Next = F;
+ ++Next;
+ if (Next != L && isa<FunctionDecl>(*Next)) {
+ Result.StoredKind = OverloadedDeclFromIdResolver;
+ Result.First = F.getAsOpaqueValue();
+ Result.Last = L.getAsOpaqueValue();
+ return Result;
+ }
+ }
+
+ Decl *D = *F;
+ if (ObjCCompatibleAliasDecl *Alias
+ = dyn_cast_or_null<ObjCCompatibleAliasDecl>(D))
+ D = Alias->getClassInterface();
+
+ Result.StoredKind = SingleDecl;
+ Result.First = reinterpret_cast<uintptr_t>(D);
+ Result.Last = 0;
+ return Result;
+}
+
+Sema::LookupResult
+Sema::LookupResult::CreateLookupResult(ASTContext &Context,
+ DeclContext::lookup_iterator F,
+ DeclContext::lookup_iterator L) {
+ LookupResult Result;
+ Result.Context = &Context;
+
+ if (F != L && isa<FunctionDecl>(*F)) {
+ DeclContext::lookup_iterator Next = F;
+ ++Next;
+ if (Next != L && isa<FunctionDecl>(*Next)) {
+ Result.StoredKind = OverloadedDeclFromDeclContext;
+ Result.First = reinterpret_cast<uintptr_t>(F);
+ Result.Last = reinterpret_cast<uintptr_t>(L);
+ return Result;
+ }
+ }
+
+ Decl *D = *F;
+ if (ObjCCompatibleAliasDecl *Alias
+ = dyn_cast_or_null<ObjCCompatibleAliasDecl>(D))
+ D = Alias->getClassInterface();
+
+ Result.StoredKind = SingleDecl;
+ Result.First = reinterpret_cast<uintptr_t>(D);
+ Result.Last = 0;
+ return Result;
+}
+
+/// @brief Determine the result of name lookup.
+Sema::LookupResult::LookupKind Sema::LookupResult::getKind() const {
+ switch (StoredKind) {
+ case SingleDecl:
+ return (reinterpret_cast<Decl *>(First) != 0)? Found : NotFound;
+
+ case OverloadedDeclSingleDecl:
+ case OverloadedDeclFromIdResolver:
+ case OverloadedDeclFromDeclContext:
+ return FoundOverloaded;
+
+ case AmbiguousLookupStoresBasePaths:
+ return Last? AmbiguousBaseSubobjectTypes : AmbiguousBaseSubobjects;
+
+ case AmbiguousLookupStoresDecls:
+ return AmbiguousReference;
+ }
+
+ // We can't ever get here.
+ return NotFound;
+}
+
+/// @brief Converts the result of name lookup into a single (possible
+/// NULL) pointer to a declaration.
+///
+/// The resulting declaration will either be the declaration we found
+/// (if only a single declaration was found), an
+/// OverloadedFunctionDecl (if an overloaded function was found), or
+/// NULL (if no declaration was found). This conversion must not be
+/// used anywhere where name lookup could result in an ambiguity.
+///
+/// The OverloadedFunctionDecl conversion is meant as a stop-gap
+/// solution, since it causes the OverloadedFunctionDecl to be
+/// leaked. FIXME: Eventually, there will be a better way to iterate
+/// over the set of overloaded functions returned by name lookup.
+NamedDecl *Sema::LookupResult::getAsDecl() const {
+ switch (StoredKind) {
+ case SingleDecl:
+ return reinterpret_cast<NamedDecl *>(First);
+
+ case OverloadedDeclFromIdResolver:
+ return MaybeConstructOverloadSet(*Context,
+ IdentifierResolver::iterator::getFromOpaqueValue(First),
+ IdentifierResolver::iterator::getFromOpaqueValue(Last));
+
+ case OverloadedDeclFromDeclContext:
+ return MaybeConstructOverloadSet(*Context,
+ reinterpret_cast<DeclContext::lookup_iterator>(First),
+ reinterpret_cast<DeclContext::lookup_iterator>(Last));
+
+ case OverloadedDeclSingleDecl:
+ return reinterpret_cast<OverloadedFunctionDecl*>(First);
+
+ case AmbiguousLookupStoresDecls:
+ case AmbiguousLookupStoresBasePaths:
+ assert(false &&
+ "Name lookup returned an ambiguity that could not be handled");
+ break;
+ }
+
+ return 0;
+}
+
+/// @brief Retrieves the BasePaths structure describing an ambiguous
+/// name lookup, or null.
+BasePaths *Sema::LookupResult::getBasePaths() const {
+ if (StoredKind == AmbiguousLookupStoresBasePaths)
+ return reinterpret_cast<BasePaths *>(First);
+ return 0;
+}
+
+Sema::LookupResult::iterator::reference
+Sema::LookupResult::iterator::operator*() const {
+ switch (Result->StoredKind) {
+ case SingleDecl:
+ return reinterpret_cast<NamedDecl*>(Current);
+
+ case OverloadedDeclSingleDecl:
+ return *reinterpret_cast<NamedDecl**>(Current);
+
+ case OverloadedDeclFromIdResolver:
+ return *IdentifierResolver::iterator::getFromOpaqueValue(Current);
+
+ case AmbiguousLookupStoresBasePaths:
+ if (Result->Last)
+ return *reinterpret_cast<NamedDecl**>(Current);
+
+ // Fall through to handle the DeclContext::lookup_iterator we're
+ // storing.
+
+ case OverloadedDeclFromDeclContext:
+ case AmbiguousLookupStoresDecls:
+ return *reinterpret_cast<DeclContext::lookup_iterator>(Current);
+ }
+
+ return 0;
+}
+
+Sema::LookupResult::iterator& Sema::LookupResult::iterator::operator++() {
+ switch (Result->StoredKind) {
+ case SingleDecl:
+ Current = reinterpret_cast<uintptr_t>((NamedDecl*)0);
+ break;
+
+ case OverloadedDeclSingleDecl: {
+ NamedDecl ** I = reinterpret_cast<NamedDecl**>(Current);
+ ++I;
+ Current = reinterpret_cast<uintptr_t>(I);
+ break;
+ }
+
+ case OverloadedDeclFromIdResolver: {
+ IdentifierResolver::iterator I
+ = IdentifierResolver::iterator::getFromOpaqueValue(Current);
+ ++I;
+ Current = I.getAsOpaqueValue();
+ break;
+ }
+
+ case AmbiguousLookupStoresBasePaths:
+ if (Result->Last) {
+ NamedDecl ** I = reinterpret_cast<NamedDecl**>(Current);
+ ++I;
+ Current = reinterpret_cast<uintptr_t>(I);
+ break;
+ }
+ // Fall through to handle the DeclContext::lookup_iterator we're
+ // storing.
+
+ case OverloadedDeclFromDeclContext:
+ case AmbiguousLookupStoresDecls: {
+ DeclContext::lookup_iterator I
+ = reinterpret_cast<DeclContext::lookup_iterator>(Current);
+ ++I;
+ Current = reinterpret_cast<uintptr_t>(I);
+ break;
+ }
+ }
+
+ return *this;
+}
+
+Sema::LookupResult::iterator Sema::LookupResult::begin() {
+ switch (StoredKind) {
+ case SingleDecl:
+ case OverloadedDeclFromIdResolver:
+ case OverloadedDeclFromDeclContext:
+ case AmbiguousLookupStoresDecls:
+ return iterator(this, First);
+
+ case OverloadedDeclSingleDecl: {
+ OverloadedFunctionDecl * Ovl =
+ reinterpret_cast<OverloadedFunctionDecl*>(First);
+ return iterator(this,
+ reinterpret_cast<uintptr_t>(&(*Ovl->function_begin())));
+ }
+
+ case AmbiguousLookupStoresBasePaths:
+ if (Last)
+ return iterator(this,
+ reinterpret_cast<uintptr_t>(getBasePaths()->found_decls_begin()));
+ else
+ return iterator(this,
+ reinterpret_cast<uintptr_t>(getBasePaths()->front().Decls.first));
+ }
+
+ // Required to suppress GCC warning.
+ return iterator();
+}
+
+Sema::LookupResult::iterator Sema::LookupResult::end() {
+ switch (StoredKind) {
+ case SingleDecl:
+ case OverloadedDeclFromIdResolver:
+ case OverloadedDeclFromDeclContext:
+ case AmbiguousLookupStoresDecls:
+ return iterator(this, Last);
+
+ case OverloadedDeclSingleDecl: {
+ OverloadedFunctionDecl * Ovl =
+ reinterpret_cast<OverloadedFunctionDecl*>(First);
+ return iterator(this,
+ reinterpret_cast<uintptr_t>(&(*Ovl->function_end())));
+ }
+
+ case AmbiguousLookupStoresBasePaths:
+ if (Last)
+ return iterator(this,
+ reinterpret_cast<uintptr_t>(getBasePaths()->found_decls_end()));
+ else
+ return iterator(this, reinterpret_cast<uintptr_t>(
+ getBasePaths()->front().Decls.second));
+ }
+
+ // Required to suppress GCC warning.
+ return iterator();
+}
+
+void Sema::LookupResult::Destroy() {
+ if (BasePaths *Paths = getBasePaths())
+ delete Paths;
+ else if (getKind() == AmbiguousReference)
+ delete[] reinterpret_cast<NamedDecl **>(First);
+}
+
+static void
+CppNamespaceLookup(ASTContext &Context, DeclContext *NS,
+ DeclarationName Name, Sema::LookupNameKind NameKind,
+ unsigned IDNS, LookupResultsTy &Results,
+ UsingDirectivesTy *UDirs = 0) {
+
+ assert(NS && NS->isFileContext() && "CppNamespaceLookup() requires namespace!");
+
+ // Perform qualified name lookup into the LookupCtx.
+ DeclContext::lookup_iterator I, E;
+ for (llvm::tie(I, E) = NS->lookup(Context, Name); I != E; ++I)
+ if (Sema::isAcceptableLookupResult(*I, NameKind, IDNS)) {
+ Results.push_back(Sema::LookupResult::CreateLookupResult(Context, I, E));
+ break;
+ }
+
+ if (UDirs) {
+ // For each UsingDirectiveDecl, which common ancestor is equal
+ // to NS, we preform qualified name lookup into namespace nominated by it.
+ UsingDirectivesTy::const_iterator UI, UEnd;
+ llvm::tie(UI, UEnd) =
+ std::equal_range(UDirs->begin(), UDirs->end(), NS,
+ UsingDirAncestorCompare());
+
+ for (; UI != UEnd; ++UI)
+ CppNamespaceLookup(Context, (*UI)->getNominatedNamespace(),
+ Name, NameKind, IDNS, Results);
+ }
+}
+
+static bool isNamespaceOrTranslationUnitScope(Scope *S) {
+ if (DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity()))
+ return Ctx->isFileContext();
+ return false;
+}
+
+std::pair<bool, Sema::LookupResult>
+Sema::CppLookupName(Scope *S, DeclarationName Name,
+ LookupNameKind NameKind, bool RedeclarationOnly) {
+ assert(getLangOptions().CPlusPlus &&
+ "Can perform only C++ lookup");
+ unsigned IDNS
+ = getIdentifierNamespacesFromLookupNameKind(NameKind, /*CPlusPlus*/ true);
+ Scope *Initial = S;
+ DeclContext *OutOfLineCtx = 0;
+ IdentifierResolver::iterator
+ I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+
+ // First we lookup local scope.
+ // We don't consider using-directives, as per 7.3.4.p1 [namespace.udir]
+ // ...During unqualified name lookup (3.4.1), the names appear as if
+ // they were declared in the nearest enclosing namespace which contains
+ // both the using-directive and the nominated namespace.
+ // [Note: in this context, “contains” means “contains directly or
+ // indirectly”.
+ //
+ // For example:
+ // namespace A { int i; }
+ // void foo() {
+ // int i;
+ // {
+ // using namespace A;
+ // ++i; // finds local 'i', A::i appears at global scope
+ // }
+ // }
+ //
+ for (; S && !isNamespaceOrTranslationUnitScope(S); S = S->getParent()) {
+ // Check whether the IdResolver has anything in this scope.
+ for (; I != IEnd && S->isDeclScope(DeclPtrTy::make(*I)); ++I) {
+ if (isAcceptableLookupResult(*I, NameKind, IDNS)) {
+ // We found something. Look for anything else in our scope
+ // with this same name and in an acceptable identifier
+ // namespace, so that we can construct an overload set if we
+ // need to.
+ IdentifierResolver::iterator LastI = I;
+ for (++LastI; LastI != IEnd; ++LastI) {
+ if (!S->isDeclScope(DeclPtrTy::make(*LastI)))
+ break;
+ }
+ LookupResult Result =
+ LookupResult::CreateLookupResult(Context, I, LastI);
+ return std::make_pair(true, Result);
+ }
+ }
+ if (DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity())) {
+ LookupResult R;
+ // Perform member lookup into struct.
+ // FIXME: In some cases, we know that every name that could be found by
+ // this qualified name lookup will also be on the identifier chain. For
+ // example, inside a class without any base classes, we never need to
+ // perform qualified lookup because all of the members are on top of the
+ // identifier chain.
+ if (isa<RecordDecl>(Ctx)) {
+ R = LookupQualifiedName(Ctx, Name, NameKind, RedeclarationOnly);
+ if (R || RedeclarationOnly)
+ return std::make_pair(true, R);
+ }
+ if (Ctx->getParent() != Ctx->getLexicalParent()
+ || isa<CXXMethodDecl>(Ctx)) {
+ // It is out of line defined C++ method or struct, we continue
+ // doing name lookup in parent context. Once we will find namespace
+ // or translation-unit we save it for possible checking
+ // using-directives later.
+ for (OutOfLineCtx = Ctx; OutOfLineCtx && !OutOfLineCtx->isFileContext();
+ OutOfLineCtx = OutOfLineCtx->getParent()) {
+ R = LookupQualifiedName(OutOfLineCtx, Name, NameKind, RedeclarationOnly);
+ if (R || RedeclarationOnly)
+ return std::make_pair(true, R);
+ }
+ }
+ }
+ }
+
+ // Collect UsingDirectiveDecls in all scopes, and recursively all
+ // nominated namespaces by those using-directives.
+ // UsingDirectives are pushed to heap, in common ancestor pointer value order.
+ // FIXME: Cache this sorted list in Scope structure, and DeclContext, so we
+ // don't build it for each lookup!
+ UsingDirectivesTy UDirs;
+ for (Scope *SC = Initial; SC; SC = SC->getParent())
+ if (SC->getFlags() & Scope::DeclScope)
+ AddScopeUsingDirectives(Context, SC, UDirs);
+
+ // Sort heapified UsingDirectiveDecls.
+ std::sort_heap(UDirs.begin(), UDirs.end(), UsingDirAncestorCompare());
+
+ // Lookup namespace scope, and global scope.
+ // Unqualified name lookup in C++ requires looking into scopes
+ // that aren't strictly lexical, and therefore we walk through the
+ // context as well as walking through the scopes.
+
+ LookupResultsTy LookupResults;
+ assert((!OutOfLineCtx || OutOfLineCtx->isFileContext()) &&
+ "We should have been looking only at file context here already.");
+ bool LookedInCtx = false;
+ LookupResult Result;
+ while (OutOfLineCtx &&
+ OutOfLineCtx != S->getEntity() &&
+ OutOfLineCtx->isNamespace()) {
+ LookedInCtx = true;
+
+ // Look into context considering using-directives.
+ CppNamespaceLookup(Context, OutOfLineCtx, Name, NameKind, IDNS,
+ LookupResults, &UDirs);
+
+ if ((Result = MergeLookupResults(Context, LookupResults)) ||
+ (RedeclarationOnly && !OutOfLineCtx->isTransparentContext()))
+ return std::make_pair(true, Result);
+
+ OutOfLineCtx = OutOfLineCtx->getParent();
+ }
+
+ for (; S; S = S->getParent()) {
+ DeclContext *Ctx = static_cast<DeclContext *>(S->getEntity());
+ assert(Ctx && Ctx->isFileContext() &&
+ "We should have been looking only at file context here already.");
+
+ // Check whether the IdResolver has anything in this scope.
+ for (; I != IEnd && S->isDeclScope(DeclPtrTy::make(*I)); ++I) {
+ if (isAcceptableLookupResult(*I, NameKind, IDNS)) {
+ // We found something. Look for anything else in our scope
+ // with this same name and in an acceptable identifier
+ // namespace, so that we can construct an overload set if we
+ // need to.
+ IdentifierResolver::iterator LastI = I;
+ for (++LastI; LastI != IEnd; ++LastI) {
+ if (!S->isDeclScope(DeclPtrTy::make(*LastI)))
+ break;
+ }
+
+ // We store name lookup result, and continue trying to look into
+ // associated context, and maybe namespaces nominated by
+ // using-directives.
+ LookupResults.push_back(
+ LookupResult::CreateLookupResult(Context, I, LastI));
+ break;
+ }
+ }
+
+ LookedInCtx = true;
+ // Look into context considering using-directives.
+ CppNamespaceLookup(Context, Ctx, Name, NameKind, IDNS,
+ LookupResults, &UDirs);
+
+ if ((Result = MergeLookupResults(Context, LookupResults)) ||
+ (RedeclarationOnly && !Ctx->isTransparentContext()))
+ return std::make_pair(true, Result);
+ }
+
+ if (!(LookedInCtx || LookupResults.empty())) {
+ // We didn't Performed lookup in Scope entity, so we return
+ // result form IdentifierResolver.
+ assert((LookupResults.size() == 1) && "Wrong size!");
+ return std::make_pair(true, LookupResults.front());
+ }
+ return std::make_pair(false, LookupResult());
+}
+
+/// @brief Perform unqualified name lookup starting from a given
+/// scope.
+///
+/// Unqualified name lookup (C++ [basic.lookup.unqual], C99 6.2.1) is
+/// used to find names within the current scope. For example, 'x' in
+/// @code
+/// int x;
+/// int f() {
+/// return x; // unqualified name look finds 'x' in the global scope
+/// }
+/// @endcode
+///
+/// Different lookup criteria can find different names. For example, a
+/// particular scope can have both a struct and a function of the same
+/// name, and each can be found by certain lookup criteria. For more
+/// information about lookup criteria, see the documentation for the
+/// class LookupCriteria.
+///
+/// @param S The scope from which unqualified name lookup will
+/// begin. If the lookup criteria permits, name lookup may also search
+/// in the parent scopes.
+///
+/// @param Name The name of the entity that we are searching for.
+///
+/// @param Loc If provided, the source location where we're performing
+/// name lookup. At present, this is only used to produce diagnostics when
+/// C library functions (like "malloc") are implicitly declared.
+///
+/// @returns The result of name lookup, which includes zero or more
+/// declarations and possibly additional information used to diagnose
+/// ambiguities.
+Sema::LookupResult
+Sema::LookupName(Scope *S, DeclarationName Name, LookupNameKind NameKind,
+ bool RedeclarationOnly, bool AllowBuiltinCreation,
+ SourceLocation Loc) {
+ if (!Name) return LookupResult::CreateLookupResult(Context, 0);
+
+ if (!getLangOptions().CPlusPlus) {
+ // Unqualified name lookup in C/Objective-C is purely lexical, so
+ // search in the declarations attached to the name.
+ unsigned IDNS = 0;
+ switch (NameKind) {
+ case Sema::LookupOrdinaryName:
+ IDNS = Decl::IDNS_Ordinary;
+ break;
+
+ case Sema::LookupTagName:
+ IDNS = Decl::IDNS_Tag;
+ break;
+
+ case Sema::LookupMemberName:
+ IDNS = Decl::IDNS_Member;
+ break;
+
+ case Sema::LookupOperatorName:
+ case Sema::LookupNestedNameSpecifierName:
+ case Sema::LookupNamespaceName:
+ assert(false && "C does not perform these kinds of name lookup");
+ break;
+
+ case Sema::LookupRedeclarationWithLinkage:
+ // Find the nearest non-transparent declaration scope.
+ while (!(S->getFlags() & Scope::DeclScope) ||
+ (S->getEntity() &&
+ static_cast<DeclContext *>(S->getEntity())
+ ->isTransparentContext()))
+ S = S->getParent();
+ IDNS = Decl::IDNS_Ordinary;
+ break;
+
+ case Sema::LookupObjCProtocolName:
+ IDNS = Decl::IDNS_ObjCProtocol;
+ break;
+
+ case Sema::LookupObjCImplementationName:
+ IDNS = Decl::IDNS_ObjCImplementation;
+ break;
+
+ case Sema::LookupObjCCategoryImplName:
+ IDNS = Decl::IDNS_ObjCCategoryImpl;
+ break;
+ }
+
+ // Scan up the scope chain looking for a decl that matches this
+ // identifier that is in the appropriate namespace. This search
+ // should not take long, as shadowing of names is uncommon, and
+ // deep shadowing is extremely uncommon.
+ bool LeftStartingScope = false;
+
+ for (IdentifierResolver::iterator I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+ I != IEnd; ++I)
+ if ((*I)->isInIdentifierNamespace(IDNS)) {
+ if (NameKind == LookupRedeclarationWithLinkage) {
+ // Determine whether this (or a previous) declaration is
+ // out-of-scope.
+ if (!LeftStartingScope && !S->isDeclScope(DeclPtrTy::make(*I)))
+ LeftStartingScope = true;
+
+ // If we found something outside of our starting scope that
+ // does not have linkage, skip it.
+ if (LeftStartingScope && !((*I)->hasLinkage()))
+ continue;
+ }
+
+ if ((*I)->getAttr<OverloadableAttr>()) {
+ // If this declaration has the "overloadable" attribute, we
+ // might have a set of overloaded functions.
+
+ // Figure out what scope the identifier is in.
+ while (!(S->getFlags() & Scope::DeclScope) ||
+ !S->isDeclScope(DeclPtrTy::make(*I)))
+ S = S->getParent();
+
+ // Find the last declaration in this scope (with the same
+ // name, naturally).
+ IdentifierResolver::iterator LastI = I;
+ for (++LastI; LastI != IEnd; ++LastI) {
+ if (!S->isDeclScope(DeclPtrTy::make(*LastI)))
+ break;
+ }
+
+ return LookupResult::CreateLookupResult(Context, I, LastI);
+ }
+
+ // We have a single lookup result.
+ return LookupResult::CreateLookupResult(Context, *I);
+ }
+ } else {
+ // Perform C++ unqualified name lookup.
+ std::pair<bool, LookupResult> MaybeResult =
+ CppLookupName(S, Name, NameKind, RedeclarationOnly);
+ if (MaybeResult.first)
+ return MaybeResult.second;
+ }
+
+ // If we didn't find a use of this identifier, and if the identifier
+ // corresponds to a compiler builtin, create the decl object for the builtin
+ // now, injecting it into translation unit scope, and return it.
+ if (NameKind == LookupOrdinaryName ||
+ NameKind == LookupRedeclarationWithLinkage) {
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ if (II && AllowBuiltinCreation) {
+ // If this is a builtin on this (or all) targets, create the decl.
+ if (unsigned BuiltinID = II->getBuiltinID()) {
+ // In C++, we don't have any predefined library functions like
+ // 'malloc'. Instead, we'll just error.
+ if (getLangOptions().CPlusPlus &&
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return LookupResult::CreateLookupResult(Context, 0);
+
+ return LookupResult::CreateLookupResult(Context,
+ LazilyCreateBuiltin((IdentifierInfo *)II, BuiltinID,
+ S, RedeclarationOnly, Loc));
+ }
+ }
+ }
+ return LookupResult::CreateLookupResult(Context, 0);
+}
+
+/// @brief Perform qualified name lookup into a given context.
+///
+/// Qualified name lookup (C++ [basic.lookup.qual]) is used to find
+/// names when the context of those names is explicit specified, e.g.,
+/// "std::vector" or "x->member".
+///
+/// Different lookup criteria can find different names. For example, a
+/// particular scope can have both a struct and a function of the same
+/// name, and each can be found by certain lookup criteria. For more
+/// information about lookup criteria, see the documentation for the
+/// class LookupCriteria.
+///
+/// @param LookupCtx The context in which qualified name lookup will
+/// search. If the lookup criteria permits, name lookup may also search
+/// in the parent contexts or (for C++ classes) base classes.
+///
+/// @param Name The name of the entity that we are searching for.
+///
+/// @param Criteria The criteria that this routine will use to
+/// determine which names are visible and which names will be
+/// found. Note that name lookup will find a name that is visible by
+/// the given criteria, but the entity itself may not be semantically
+/// correct or even the kind of entity expected based on the
+/// lookup. For example, searching for a nested-name-specifier name
+/// might result in an EnumDecl, which is visible but is not permitted
+/// as a nested-name-specifier in C++03.
+///
+/// @returns The result of name lookup, which includes zero or more
+/// declarations and possibly additional information used to diagnose
+/// ambiguities.
+Sema::LookupResult
+Sema::LookupQualifiedName(DeclContext *LookupCtx, DeclarationName Name,
+ LookupNameKind NameKind, bool RedeclarationOnly) {
+ assert(LookupCtx && "Sema::LookupQualifiedName requires a lookup context");
+
+ if (!Name) return LookupResult::CreateLookupResult(Context, 0);
+
+ // If we're performing qualified name lookup (e.g., lookup into a
+ // struct), find fields as part of ordinary name lookup.
+ unsigned IDNS
+ = getIdentifierNamespacesFromLookupNameKind(NameKind,
+ getLangOptions().CPlusPlus);
+ if (NameKind == LookupOrdinaryName)
+ IDNS |= Decl::IDNS_Member;
+
+ // Perform qualified name lookup into the LookupCtx.
+ DeclContext::lookup_iterator I, E;
+ for (llvm::tie(I, E) = LookupCtx->lookup(Context, Name); I != E; ++I)
+ if (isAcceptableLookupResult(*I, NameKind, IDNS))
+ return LookupResult::CreateLookupResult(Context, I, E);
+
+ // If this isn't a C++ class or we aren't allowed to look into base
+ // classes, we're done.
+ if (RedeclarationOnly || !isa<CXXRecordDecl>(LookupCtx))
+ return LookupResult::CreateLookupResult(Context, 0);
+
+ // Perform lookup into our base classes.
+ BasePaths Paths;
+ Paths.setOrigin(Context.getTypeDeclType(cast<RecordDecl>(LookupCtx)));
+
+ // Look for this member in our base classes
+ if (!LookupInBases(cast<CXXRecordDecl>(LookupCtx),
+ MemberLookupCriteria(Name, NameKind, IDNS), Paths))
+ return LookupResult::CreateLookupResult(Context, 0);
+
+ // C++ [class.member.lookup]p2:
+ // [...] If the resulting set of declarations are not all from
+ // sub-objects of the same type, or the set has a nonstatic member
+ // and includes members from distinct sub-objects, there is an
+ // ambiguity and the program is ill-formed. Otherwise that set is
+ // the result of the lookup.
+ // FIXME: support using declarations!
+ QualType SubobjectType;
+ int SubobjectNumber = 0;
+ for (BasePaths::paths_iterator Path = Paths.begin(), PathEnd = Paths.end();
+ Path != PathEnd; ++Path) {
+ const BasePathElement &PathElement = Path->back();
+
+ // Determine whether we're looking at a distinct sub-object or not.
+ if (SubobjectType.isNull()) {
+ // This is the first subobject we've looked at. Record it's type.
+ SubobjectType = Context.getCanonicalType(PathElement.Base->getType());
+ SubobjectNumber = PathElement.SubobjectNumber;
+ } else if (SubobjectType
+ != Context.getCanonicalType(PathElement.Base->getType())) {
+ // We found members of the given name in two subobjects of
+ // different types. This lookup is ambiguous.
+ BasePaths *PathsOnHeap = new BasePaths;
+ PathsOnHeap->swap(Paths);
+ return LookupResult::CreateLookupResult(Context, PathsOnHeap, true);
+ } else if (SubobjectNumber != PathElement.SubobjectNumber) {
+ // We have a different subobject of the same type.
+
+ // C++ [class.member.lookup]p5:
+ // A static member, a nested type or an enumerator defined in
+ // a base class T can unambiguously be found even if an object
+ // has more than one base class subobject of type T.
+ Decl *FirstDecl = *Path->Decls.first;
+ if (isa<VarDecl>(FirstDecl) ||
+ isa<TypeDecl>(FirstDecl) ||
+ isa<EnumConstantDecl>(FirstDecl))
+ continue;
+
+ if (isa<CXXMethodDecl>(FirstDecl)) {
+ // Determine whether all of the methods are static.
+ bool AllMethodsAreStatic = true;
+ for (DeclContext::lookup_iterator Func = Path->Decls.first;
+ Func != Path->Decls.second; ++Func) {
+ if (!isa<CXXMethodDecl>(*Func)) {
+ assert(isa<TagDecl>(*Func) && "Non-function must be a tag decl");
+ break;
+ }
+
+ if (!cast<CXXMethodDecl>(*Func)->isStatic()) {
+ AllMethodsAreStatic = false;
+ break;
+ }
+ }
+
+ if (AllMethodsAreStatic)
+ continue;
+ }
+
+ // We have found a nonstatic member name in multiple, distinct
+ // subobjects. Name lookup is ambiguous.
+ BasePaths *PathsOnHeap = new BasePaths;
+ PathsOnHeap->swap(Paths);
+ return LookupResult::CreateLookupResult(Context, PathsOnHeap, false);
+ }
+ }
+
+ // Lookup in a base class succeeded; return these results.
+
+ // If we found a function declaration, return an overload set.
+ if (isa<FunctionDecl>(*Paths.front().Decls.first))
+ return LookupResult::CreateLookupResult(Context,
+ Paths.front().Decls.first, Paths.front().Decls.second);
+
+ // We found a non-function declaration; return a single declaration.
+ return LookupResult::CreateLookupResult(Context, *Paths.front().Decls.first);
+}
+
+/// @brief Performs name lookup for a name that was parsed in the
+/// source code, and may contain a C++ scope specifier.
+///
+/// This routine is a convenience routine meant to be called from
+/// contexts that receive a name and an optional C++ scope specifier
+/// (e.g., "N::M::x"). It will then perform either qualified or
+/// unqualified name lookup (with LookupQualifiedName or LookupName,
+/// respectively) on the given name and return those results.
+///
+/// @param S The scope from which unqualified name lookup will
+/// begin.
+///
+/// @param SS An optional C++ scope-specified, e.g., "::N::M".
+///
+/// @param Name The name of the entity that name lookup will
+/// search for.
+///
+/// @param Loc If provided, the source location where we're performing
+/// name lookup. At present, this is only used to produce diagnostics when
+/// C library functions (like "malloc") are implicitly declared.
+///
+/// @returns The result of qualified or unqualified name lookup.
+Sema::LookupResult
+Sema::LookupParsedName(Scope *S, const CXXScopeSpec *SS,
+ DeclarationName Name, LookupNameKind NameKind,
+ bool RedeclarationOnly, bool AllowBuiltinCreation,
+ SourceLocation Loc) {
+ if (SS && (SS->isSet() || SS->isInvalid())) {
+ // If the scope specifier is invalid, don't even look for
+ // anything.
+ if (SS->isInvalid())
+ return LookupResult::CreateLookupResult(Context, 0);
+
+ assert(!isUnknownSpecialization(*SS) && "Can't lookup dependent types");
+
+ if (isDependentScopeSpecifier(*SS)) {
+ // Determine whether we are looking into the current
+ // instantiation.
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS->getScopeRep());
+ CXXRecordDecl *Current = getCurrentInstantiationOf(NNS);
+ assert(Current && "Bad dependent scope specifier");
+
+ // We nested name specifier refers to the current instantiation,
+ // so now we will look for a member of the current instantiation
+ // (C++0x [temp.dep.type]).
+ unsigned IDNS = getIdentifierNamespacesFromLookupNameKind(NameKind, true);
+ DeclContext::lookup_iterator I, E;
+ for (llvm::tie(I, E) = Current->lookup(Context, Name); I != E; ++I)
+ if (isAcceptableLookupResult(*I, NameKind, IDNS))
+ return LookupResult::CreateLookupResult(Context, I, E);
+ }
+
+ if (RequireCompleteDeclContext(*SS))
+ return LookupResult::CreateLookupResult(Context, 0);
+
+ return LookupQualifiedName(computeDeclContext(*SS),
+ Name, NameKind, RedeclarationOnly);
+ }
+
+ return LookupName(S, Name, NameKind, RedeclarationOnly,
+ AllowBuiltinCreation, Loc);
+}
+
+
+/// @brief Produce a diagnostic describing the ambiguity that resulted
+/// from name lookup.
+///
+/// @param Result The ambiguous name lookup result.
+///
+/// @param Name The name of the entity that name lookup was
+/// searching for.
+///
+/// @param NameLoc The location of the name within the source code.
+///
+/// @param LookupRange A source range that provides more
+/// source-location information concerning the lookup itself. For
+/// example, this range might highlight a nested-name-specifier that
+/// precedes the name.
+///
+/// @returns true
+bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result, DeclarationName Name,
+ SourceLocation NameLoc,
+ SourceRange LookupRange) {
+ assert(Result.isAmbiguous() && "Lookup result must be ambiguous");
+
+ if (BasePaths *Paths = Result.getBasePaths()) {
+ if (Result.getKind() == LookupResult::AmbiguousBaseSubobjects) {
+ QualType SubobjectType = Paths->front().back().Base->getType();
+ Diag(NameLoc, diag::err_ambiguous_member_multiple_subobjects)
+ << Name << SubobjectType << getAmbiguousPathsDisplayString(*Paths)
+ << LookupRange;
+
+ DeclContext::lookup_iterator Found = Paths->front().Decls.first;
+ while (isa<CXXMethodDecl>(*Found) &&
+ cast<CXXMethodDecl>(*Found)->isStatic())
+ ++Found;
+
+ Diag((*Found)->getLocation(), diag::note_ambiguous_member_found);
+
+ Result.Destroy();
+ return true;
+ }
+
+ assert(Result.getKind() == LookupResult::AmbiguousBaseSubobjectTypes &&
+ "Unhandled form of name lookup ambiguity");
+
+ Diag(NameLoc, diag::err_ambiguous_member_multiple_subobject_types)
+ << Name << LookupRange;
+
+ std::set<Decl *> DeclsPrinted;
+ for (BasePaths::paths_iterator Path = Paths->begin(), PathEnd = Paths->end();
+ Path != PathEnd; ++Path) {
+ Decl *D = *Path->Decls.first;
+ if (DeclsPrinted.insert(D).second)
+ Diag(D->getLocation(), diag::note_ambiguous_member_found);
+ }
+
+ Result.Destroy();
+ return true;
+ } else if (Result.getKind() == LookupResult::AmbiguousReference) {
+ Diag(NameLoc, diag::err_ambiguous_reference) << Name << LookupRange;
+
+ NamedDecl **DI = reinterpret_cast<NamedDecl **>(Result.First),
+ **DEnd = reinterpret_cast<NamedDecl **>(Result.Last);
+
+ for (; DI != DEnd; ++DI)
+ Diag((*DI)->getLocation(), diag::note_ambiguous_candidate) << *DI;
+
+ Result.Destroy();
+ return true;
+ }
+
+ assert(false && "Unhandled form of name lookup ambiguity");
+
+ // We can't reach here.
+ return true;
+}
+
+// \brief Add the associated classes and namespaces for
+// argument-dependent lookup with an argument of class type
+// (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(CXXRecordDecl *Class,
+ ASTContext &Context,
+ Sema::AssociatedNamespaceSet &AssociatedNamespaces,
+ Sema::AssociatedClassSet &AssociatedClasses) {
+ // C++ [basic.lookup.koenig]p2:
+ // [...]
+ // -- If T is a class type (including unions), its associated
+ // classes are: the class itself; the class of which it is a
+ // member, if any; and its direct and indirect base
+ // classes. Its associated namespaces are the namespaces in
+ // which its associated classes are defined.
+
+ // Add the class of which it is a member, if any.
+ DeclContext *Ctx = Class->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ AssociatedClasses.insert(EnclosingClass);
+
+ // Add the associated namespace for this class.
+ while (Ctx->isRecord())
+ Ctx = Ctx->getParent();
+ if (NamespaceDecl *EnclosingNamespace = dyn_cast<NamespaceDecl>(Ctx))
+ AssociatedNamespaces.insert(EnclosingNamespace);
+
+ // Add the class itself. If we've already seen this class, we don't
+ // need to visit base classes.
+ if (!AssociatedClasses.insert(Class))
+ return;
+
+ // FIXME: Handle class template specializations
+
+ // Add direct and indirect base classes along with their associated
+ // namespaces.
+ llvm::SmallVector<CXXRecordDecl *, 32> Bases;
+ Bases.push_back(Class);
+ while (!Bases.empty()) {
+ // Pop this class off the stack.
+ Class = Bases.back();
+ Bases.pop_back();
+
+ // Visit the base classes.
+ for (CXXRecordDecl::base_class_iterator Base = Class->bases_begin(),
+ BaseEnd = Class->bases_end();
+ Base != BaseEnd; ++Base) {
+ const RecordType *BaseType = Base->getType()->getAsRecordType();
+ CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (AssociatedClasses.insert(BaseDecl)) {
+ // Find the associated namespace for this base class.
+ DeclContext *BaseCtx = BaseDecl->getDeclContext();
+ while (BaseCtx->isRecord())
+ BaseCtx = BaseCtx->getParent();
+ if (NamespaceDecl *EnclosingNamespace = dyn_cast<NamespaceDecl>(BaseCtx))
+ AssociatedNamespaces.insert(EnclosingNamespace);
+
+ // Make sure we visit the bases of this base class.
+ if (BaseDecl->bases_begin() != BaseDecl->bases_end())
+ Bases.push_back(BaseDecl);
+ }
+ }
+ }
+}
+
+// \brief Add the associated classes and namespaces for
+// argument-dependent lookup with an argument of type T
+// (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(QualType T,
+ ASTContext &Context,
+ Sema::AssociatedNamespaceSet &AssociatedNamespaces,
+ Sema::AssociatedClassSet &AssociatedClasses) {
+ // C++ [basic.lookup.koenig]p2:
+ //
+ // For each argument type T in the function call, there is a set
+ // of zero or more associated namespaces and a set of zero or more
+ // associated classes to be considered. The sets of namespaces and
+ // classes is determined entirely by the types of the function
+ // arguments (and the namespace of any template template
+ // argument). Typedef names and using-declarations used to specify
+ // the types do not contribute to this set. The sets of namespaces
+ // and classes are determined in the following way:
+ T = Context.getCanonicalType(T).getUnqualifiedType();
+
+ // -- If T is a pointer to U or an array of U, its associated
+ // namespaces and classes are those associated with U.
+ //
+ // We handle this by unwrapping pointer and array types immediately,
+ // to avoid unnecessary recursion.
+ while (true) {
+ if (const PointerType *Ptr = T->getAsPointerType())
+ T = Ptr->getPointeeType();
+ else if (const ArrayType *Ptr = Context.getAsArrayType(T))
+ T = Ptr->getElementType();
+ else
+ break;
+ }
+
+ // -- If T is a fundamental type, its associated sets of
+ // namespaces and classes are both empty.
+ if (T->getAsBuiltinType())
+ return;
+
+ // -- If T is a class type (including unions), its associated
+ // classes are: the class itself; the class of which it is a
+ // member, if any; and its direct and indirect base
+ // classes. Its associated namespaces are the namespaces in
+ // which its associated classes are defined.
+ if (const RecordType *ClassType = T->getAsRecordType())
+ if (CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(ClassType->getDecl())) {
+ addAssociatedClassesAndNamespaces(ClassDecl, Context,
+ AssociatedNamespaces,
+ AssociatedClasses);
+ return;
+ }
+
+ // -- If T is an enumeration type, its associated namespace is
+ // the namespace in which it is defined. If it is class
+ // member, its associated class is the member’s class; else
+ // it has no associated class.
+ if (const EnumType *EnumT = T->getAsEnumType()) {
+ EnumDecl *Enum = EnumT->getDecl();
+
+ DeclContext *Ctx = Enum->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ AssociatedClasses.insert(EnclosingClass);
+
+ // Add the associated namespace for this class.
+ while (Ctx->isRecord())
+ Ctx = Ctx->getParent();
+ if (NamespaceDecl *EnclosingNamespace = dyn_cast<NamespaceDecl>(Ctx))
+ AssociatedNamespaces.insert(EnclosingNamespace);
+
+ return;
+ }
+
+ // -- If T is a function type, its associated namespaces and
+ // classes are those associated with the function parameter
+ // types and those associated with the return type.
+ if (const FunctionType *FunctionType = T->getAsFunctionType()) {
+ // Return type
+ addAssociatedClassesAndNamespaces(FunctionType->getResultType(),
+ Context,
+ AssociatedNamespaces, AssociatedClasses);
+
+ const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FunctionType);
+ if (!Proto)
+ return;
+
+ // Argument types
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ addAssociatedClassesAndNamespaces(*Arg, Context,
+ AssociatedNamespaces, AssociatedClasses);
+
+ return;
+ }
+
+ // -- If T is a pointer to a member function of a class X, its
+ // associated namespaces and classes are those associated
+ // with the function parameter types and return type,
+ // together with those associated with X.
+ //
+ // -- If T is a pointer to a data member of class X, its
+ // associated namespaces and classes are those associated
+ // with the member type together with those associated with
+ // X.
+ if (const MemberPointerType *MemberPtr = T->getAsMemberPointerType()) {
+ // Handle the type that the pointer to member points to.
+ addAssociatedClassesAndNamespaces(MemberPtr->getPointeeType(),
+ Context,
+ AssociatedNamespaces, AssociatedClasses);
+
+ // Handle the class type into which this points.
+ if (const RecordType *Class = MemberPtr->getClass()->getAsRecordType())
+ addAssociatedClassesAndNamespaces(cast<CXXRecordDecl>(Class->getDecl()),
+ Context,
+ AssociatedNamespaces, AssociatedClasses);
+
+ return;
+ }
+
+ // FIXME: What about block pointers?
+ // FIXME: What about Objective-C message sends?
+}
+
+/// \brief Find the associated classes and namespaces for
+/// argument-dependent lookup for a call with the given set of
+/// arguments.
+///
+/// This routine computes the sets of associated classes and associated
+/// namespaces searched by argument-dependent lookup
+/// (C++ [basic.lookup.argdep]) for a given set of arguments.
+void
+Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs,
+ AssociatedNamespaceSet &AssociatedNamespaces,
+ AssociatedClassSet &AssociatedClasses) {
+ AssociatedNamespaces.clear();
+ AssociatedClasses.clear();
+
+ // C++ [basic.lookup.koenig]p2:
+ // For each argument type T in the function call, there is a set
+ // of zero or more associated namespaces and a set of zero or more
+ // associated classes to be considered. The sets of namespaces and
+ // classes is determined entirely by the types of the function
+ // arguments (and the namespace of any template template
+ // argument).
+ for (unsigned ArgIdx = 0; ArgIdx != NumArgs; ++ArgIdx) {
+ Expr *Arg = Args[ArgIdx];
+
+ if (Arg->getType() != Context.OverloadTy) {
+ addAssociatedClassesAndNamespaces(Arg->getType(), Context,
+ AssociatedNamespaces, AssociatedClasses);
+ continue;
+ }
+
+ // [...] In addition, if the argument is the name or address of a
+ // set of overloaded functions and/or function templates, its
+ // associated classes and namespaces are the union of those
+ // associated with each of the members of the set: the namespace
+ // in which the function or function template is defined and the
+ // classes and namespaces associated with its (non-dependent)
+ // parameter types and return type.
+ DeclRefExpr *DRE = 0;
+ if (UnaryOperator *unaryOp = dyn_cast<UnaryOperator>(Arg)) {
+ if (unaryOp->getOpcode() == UnaryOperator::AddrOf)
+ DRE = dyn_cast<DeclRefExpr>(unaryOp->getSubExpr());
+ } else
+ DRE = dyn_cast<DeclRefExpr>(Arg);
+ if (!DRE)
+ continue;
+
+ OverloadedFunctionDecl *Ovl
+ = dyn_cast<OverloadedFunctionDecl>(DRE->getDecl());
+ if (!Ovl)
+ continue;
+
+ for (OverloadedFunctionDecl::function_iterator Func = Ovl->function_begin(),
+ FuncEnd = Ovl->function_end();
+ Func != FuncEnd; ++Func) {
+ FunctionDecl *FDecl = cast<FunctionDecl>(*Func);
+
+ // Add the namespace in which this function was defined. Note
+ // that, if this is a member function, we do *not* consider the
+ // enclosing namespace of its class.
+ DeclContext *Ctx = FDecl->getDeclContext();
+ if (NamespaceDecl *EnclosingNamespace = dyn_cast<NamespaceDecl>(Ctx))
+ AssociatedNamespaces.insert(EnclosingNamespace);
+
+ // Add the classes and namespaces associated with the parameter
+ // types and return type of this function.
+ addAssociatedClassesAndNamespaces(FDecl->getType(), Context,
+ AssociatedNamespaces, AssociatedClasses);
+ }
+ }
+}
+
+/// IsAcceptableNonMemberOperatorCandidate - Determine whether Fn is
+/// an acceptable non-member overloaded operator for a call whose
+/// arguments have types T1 (and, if non-empty, T2). This routine
+/// implements the check in C++ [over.match.oper]p3b2 concerning
+/// enumeration types.
+static bool
+IsAcceptableNonMemberOperatorCandidate(FunctionDecl *Fn,
+ QualType T1, QualType T2,
+ ASTContext &Context) {
+ if (T1->isDependentType() || (!T2.isNull() && T2->isDependentType()))
+ return true;
+
+ if (T1->isRecordType() || (!T2.isNull() && T2->isRecordType()))
+ return true;
+
+ const FunctionProtoType *Proto = Fn->getType()->getAsFunctionProtoType();
+ if (Proto->getNumArgs() < 1)
+ return false;
+
+ if (T1->isEnumeralType()) {
+ QualType ArgType = Proto->getArgType(0).getNonReferenceType();
+ if (Context.getCanonicalType(T1).getUnqualifiedType()
+ == Context.getCanonicalType(ArgType).getUnqualifiedType())
+ return true;
+ }
+
+ if (Proto->getNumArgs() < 2)
+ return false;
+
+ if (!T2.isNull() && T2->isEnumeralType()) {
+ QualType ArgType = Proto->getArgType(1).getNonReferenceType();
+ if (Context.getCanonicalType(T2).getUnqualifiedType()
+ == Context.getCanonicalType(ArgType).getUnqualifiedType())
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Find the protocol with the given name, if any.
+ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II) {
+ Decl *D = LookupName(TUScope, II, LookupObjCProtocolName).getAsDecl();
+ return cast_or_null<ObjCProtocolDecl>(D);
+}
+
+/// \brief Find the Objective-C implementation with the given name, if
+/// any.
+ObjCImplementationDecl *Sema::LookupObjCImplementation(IdentifierInfo *II) {
+ Decl *D = LookupName(TUScope, II, LookupObjCImplementationName).getAsDecl();
+ return cast_or_null<ObjCImplementationDecl>(D);
+}
+
+/// \brief Find the Objective-C category implementation with the given
+/// name, if any.
+ObjCCategoryImplDecl *Sema::LookupObjCCategoryImpl(IdentifierInfo *II) {
+ Decl *D = LookupName(TUScope, II, LookupObjCCategoryImplName).getAsDecl();
+ return cast_or_null<ObjCCategoryImplDecl>(D);
+}
+
+void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
+ QualType T1, QualType T2,
+ FunctionSet &Functions) {
+ // C++ [over.match.oper]p3:
+ // -- The set of non-member candidates is the result of the
+ // unqualified lookup of operator@ in the context of the
+ // expression according to the usual rules for name lookup in
+ // unqualified function calls (3.4.2) except that all member
+ // functions are ignored. However, if no operand has a class
+ // type, only those non-member functions in the lookup set
+ // that have a first parameter of type T1 or “reference to
+ // (possibly cv-qualified) T1”, when T1 is an enumeration
+ // type, or (if there is a right operand) a second parameter
+ // of type T2 or “reference to (possibly cv-qualified) T2”,
+ // when T2 is an enumeration type, are candidate functions.
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+ LookupResult Operators = LookupName(S, OpName, LookupOperatorName);
+
+ assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
+
+ if (!Operators)
+ return;
+
+ for (LookupResult::iterator Op = Operators.begin(), OpEnd = Operators.end();
+ Op != OpEnd; ++Op) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*Op))
+ if (IsAcceptableNonMemberOperatorCandidate(FD, T1, T2, Context))
+ Functions.insert(FD); // FIXME: canonical FD
+ }
+}
+
+void Sema::ArgumentDependentLookup(DeclarationName Name,
+ Expr **Args, unsigned NumArgs,
+ FunctionSet &Functions) {
+ // Find all of the associated namespaces and classes based on the
+ // arguments we have.
+ AssociatedNamespaceSet AssociatedNamespaces;
+ AssociatedClassSet AssociatedClasses;
+ FindAssociatedClassesAndNamespaces(Args, NumArgs,
+ AssociatedNamespaces, AssociatedClasses);
+
+ // C++ [basic.lookup.argdep]p3:
+ // Let X be the lookup set produced by unqualified lookup (3.4.1)
+ // and let Y be the lookup set produced by argument dependent
+ // lookup (defined as follows). If X contains [...] then Y is
+ // empty. Otherwise Y is the set of declarations found in the
+ // namespaces associated with the argument types as described
+ // below. The set of declarations found by the lookup of the name
+ // is the union of X and Y.
+ //
+ // Here, we compute Y and add its members to the overloaded
+ // candidate set.
+ for (AssociatedNamespaceSet::iterator NS = AssociatedNamespaces.begin(),
+ NSEnd = AssociatedNamespaces.end();
+ NS != NSEnd; ++NS) {
+ // When considering an associated namespace, the lookup is the
+ // same as the lookup performed when the associated namespace is
+ // used as a qualifier (3.4.3.2) except that:
+ //
+ // -- Any using-directives in the associated namespace are
+ // ignored.
+ //
+ // -- FIXME: Any namespace-scope friend functions declared in
+ // associated classes are visible within their respective
+ // namespaces even if they are not visible during an ordinary
+ // lookup (11.4).
+ DeclContext::lookup_iterator I, E;
+ for (llvm::tie(I, E) = (*NS)->lookup(Context, Name); I != E; ++I) {
+ FunctionDecl *Func = dyn_cast<FunctionDecl>(*I);
+ if (!Func)
+ break;
+
+ Functions.insert(Func);
+ }
+ }
+}
diff --git a/lib/Sema/SemaNamedCast.cpp b/lib/Sema/SemaNamedCast.cpp
new file mode 100644
index 0000000..daf6800
--- /dev/null
+++ b/lib/Sema/SemaNamedCast.cpp
@@ -0,0 +1,932 @@
+//===--- SemaNamedCast.cpp - Semantic Analysis for Named Casts ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ named casts.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "SemaInherit.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/ADT/SmallVector.h"
+#include <set>
+using namespace clang;
+
+enum TryStaticCastResult {
+ TSC_NotApplicable, ///< The cast method is not applicable.
+ TSC_Success, ///< The cast method is appropriate and successful.
+ TSC_Failed ///< The cast method is appropriate, but failed. A
+ ///< diagnostic has been emitted.
+};
+
+static void CheckConstCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange,
+ const SourceRange &DestRange);
+static void CheckReinterpretCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange,
+ const SourceRange &DestRange);
+static void CheckStaticCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange);
+static void CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange,
+ const SourceRange &DestRange);
+
+static bool CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType);
+static TryStaticCastResult TryLValueToRValueCast(
+ Sema &Self, Expr *SrcExpr, QualType DestType, const SourceRange &OpRange);
+static TryStaticCastResult TryStaticReferenceDowncast(
+ Sema &Self, Expr *SrcExpr, QualType DestType, const SourceRange &OpRange);
+static TryStaticCastResult TryStaticPointerDowncast(
+ Sema &Self, QualType SrcType, QualType DestType, const SourceRange &OpRange);
+static TryStaticCastResult TryStaticMemberPointerUpcast(
+ Sema &Self, QualType SrcType, QualType DestType, const SourceRange &OpRange);
+static TryStaticCastResult TryStaticDowncast(Sema &Self, QualType SrcType,
+ QualType DestType,
+ const SourceRange &OpRange,
+ QualType OrigSrcType,
+ QualType OrigDestType);
+static TryStaticCastResult TryStaticImplicitCast(Sema &Self, Expr *SrcExpr,
+ QualType DestType,
+ const SourceRange &OpRange);
+
+/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+Action::OwningExprResult
+Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
+ SourceLocation LAngleBracketLoc, TypeTy *Ty,
+ SourceLocation RAngleBracketLoc,
+ SourceLocation LParenLoc, ExprArg E,
+ SourceLocation RParenLoc) {
+ Expr *Ex = E.takeAs<Expr>();
+ QualType DestType = QualType::getFromOpaquePtr(Ty);
+ SourceRange OpRange(OpLoc, RParenLoc);
+ SourceRange DestRange(LAngleBracketLoc, RAngleBracketLoc);
+
+ // If the type is dependent, we won't do the semantic analysis now.
+ // FIXME: should we check this in a more fine-grained manner?
+ bool TypeDependent = DestType->isDependentType() || Ex->isTypeDependent();
+
+ switch (Kind) {
+ default: assert(0 && "Unknown C++ cast!");
+
+ case tok::kw_const_cast:
+ if (!TypeDependent)
+ CheckConstCast(*this, Ex, DestType, OpRange, DestRange);
+ return Owned(new (Context) CXXConstCastExpr(DestType.getNonReferenceType(),
+ Ex, DestType, OpLoc));
+
+ case tok::kw_dynamic_cast:
+ if (!TypeDependent)
+ CheckDynamicCast(*this, Ex, DestType, OpRange, DestRange);
+ return Owned(new (Context)CXXDynamicCastExpr(DestType.getNonReferenceType(),
+ Ex, DestType, OpLoc));
+
+ case tok::kw_reinterpret_cast:
+ if (!TypeDependent)
+ CheckReinterpretCast(*this, Ex, DestType, OpRange, DestRange);
+ return Owned(new (Context) CXXReinterpretCastExpr(
+ DestType.getNonReferenceType(),
+ Ex, DestType, OpLoc));
+
+ case tok::kw_static_cast:
+ if (!TypeDependent)
+ CheckStaticCast(*this, Ex, DestType, OpRange);
+ return Owned(new (Context) CXXStaticCastExpr(DestType.getNonReferenceType(),
+ Ex, DestType, OpLoc));
+ }
+
+ return ExprError();
+}
+
+/// CheckConstCast - Check that a const_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.11 for details. const_cast is typically used in code
+/// like this:
+/// const char *str = "literal";
+/// legacy_function(const_cast\<char*\>(str));
+void
+CheckConstCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange, const SourceRange &DestRange)
+{
+ QualType OrigDestType = DestType, OrigSrcType = SrcExpr->getType();
+
+ DestType = Self.Context.getCanonicalType(DestType);
+ QualType SrcType = SrcExpr->getType();
+ if (const LValueReferenceType *DestTypeTmp =
+ DestType->getAsLValueReferenceType()) {
+ if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
+ // Cannot cast non-lvalue to lvalue reference type.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
+ << "const_cast" << OrigDestType << SrcExpr->getSourceRange();
+ return;
+ }
+
+ // C++ 5.2.11p4: An lvalue of type T1 can be [cast] to an lvalue of type T2
+ // [...] if a pointer to T1 can be [cast] to the type pointer to T2.
+ DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
+ SrcType = Self.Context.getPointerType(SrcType);
+ } else {
+ // C++ 5.2.11p1: Otherwise, the result is an rvalue and the
+ // lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard
+ // conversions are performed on the expression.
+ Self.DefaultFunctionArrayConversion(SrcExpr);
+ SrcType = SrcExpr->getType();
+ }
+
+ // C++ 5.2.11p5: For a const_cast involving pointers to data members [...]
+ // the rules for const_cast are the same as those used for pointers.
+
+ if (!DestType->isPointerType() && !DestType->isMemberPointerType()) {
+ // Cannot cast to non-pointer, non-reference type. Note that, if DestType
+ // was a reference type, we converted it to a pointer above.
+ // The status of rvalue references isn't entirely clear, but it looks like
+ // conversion to them is simply invalid.
+ // C++ 5.2.11p3: For two pointer types [...]
+ Self.Diag(OpRange.getBegin(), diag::err_bad_const_cast_dest)
+ << OrigDestType << DestRange;
+ return;
+ }
+ if (DestType->isFunctionPointerType() ||
+ DestType->isMemberFunctionPointerType()) {
+ // Cannot cast direct function pointers.
+ // C++ 5.2.11p2: [...] where T is any object type or the void type [...]
+ // T is the ultimate pointee of source and target type.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_const_cast_dest)
+ << OrigDestType << DestRange;
+ return;
+ }
+ SrcType = Self.Context.getCanonicalType(SrcType);
+
+ // Unwrap the pointers. Ignore qualifiers. Terminate early if the types are
+ // completely equal.
+ // FIXME: const_cast should probably not be able to convert between pointers
+ // to different address spaces.
+ // C++ 5.2.11p3 describes the core semantics of const_cast. All cv specifiers
+ // in multi-level pointers may change, but the level count must be the same,
+ // as must be the final pointee type.
+ while (SrcType != DestType &&
+ Self.UnwrapSimilarPointerTypes(SrcType, DestType)) {
+ SrcType = SrcType.getUnqualifiedType();
+ DestType = DestType.getUnqualifiedType();
+ }
+
+ // Doug Gregor said to disallow this until users complain.
+#if 0
+ // If we end up with constant arrays of equal size, unwrap those too. A cast
+ // from const int [N] to int (&)[N] is invalid by my reading of the
+ // standard, but g++ accepts it even with -ansi -pedantic.
+ // No more than one level, though, so don't embed this in the unwrap loop
+ // above.
+ const ConstantArrayType *SrcTypeArr, *DestTypeArr;
+ if ((SrcTypeArr = Self.Context.getAsConstantArrayType(SrcType)) &&
+ (DestTypeArr = Self.Context.getAsConstantArrayType(DestType)))
+ {
+ if (SrcTypeArr->getSize() != DestTypeArr->getSize()) {
+ // Different array sizes.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
+ << "const_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+ SrcType = SrcTypeArr->getElementType().getUnqualifiedType();
+ DestType = DestTypeArr->getElementType().getUnqualifiedType();
+ }
+#endif
+
+ // Since we're dealing in canonical types, the remainder must be the same.
+ if (SrcType != DestType) {
+ // Cast between unrelated types.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
+ << "const_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+}
+
+/// CheckReinterpretCast - Check that a reinterpret_cast\<DestType\>(SrcExpr) is
+/// valid.
+/// Refer to C++ 5.2.10 for details. reinterpret_cast is typically used in code
+/// like this:
+/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
+void
+CheckReinterpretCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange, const SourceRange &DestRange)
+{
+ QualType OrigDestType = DestType, OrigSrcType = SrcExpr->getType();
+
+ DestType = Self.Context.getCanonicalType(DestType);
+ QualType SrcType = SrcExpr->getType();
+ if (const LValueReferenceType *DestTypeTmp =
+ DestType->getAsLValueReferenceType()) {
+ if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
+ // Cannot cast non-lvalue to reference type.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
+ << "reinterpret_cast" << OrigDestType << SrcExpr->getSourceRange();
+ return;
+ }
+
+ // C++ 5.2.10p10: [...] a reference cast reinterpret_cast<T&>(x) has the
+ // same effect as the conversion *reinterpret_cast<T*>(&x) with the
+ // built-in & and * operators.
+ // This code does this transformation for the checked types.
+ DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
+ SrcType = Self.Context.getPointerType(SrcType);
+ } else if (const RValueReferenceType *DestTypeTmp =
+ DestType->getAsRValueReferenceType()) {
+ // Both the reference conversion and the rvalue rules apply.
+ Self.DefaultFunctionArrayConversion(SrcExpr);
+ SrcType = SrcExpr->getType();
+
+ DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
+ SrcType = Self.Context.getPointerType(SrcType);
+ } else {
+ // C++ 5.2.10p1: [...] the lvalue-to-rvalue, array-to-pointer, and
+ // function-to-pointer standard conversions are performed on the
+ // expression v.
+ Self.DefaultFunctionArrayConversion(SrcExpr);
+ SrcType = SrcExpr->getType();
+ }
+
+ // Canonicalize source for comparison.
+ SrcType = Self.Context.getCanonicalType(SrcType);
+
+ const MemberPointerType *DestMemPtr = DestType->getAsMemberPointerType(),
+ *SrcMemPtr = SrcType->getAsMemberPointerType();
+ if (DestMemPtr && SrcMemPtr) {
+ // C++ 5.2.10p9: An rvalue of type "pointer to member of X of type T1"
+ // can be explicitly converted to an rvalue of type "pointer to member
+ // of Y of type T2" if T1 and T2 are both function types or both object
+ // types.
+ if (DestMemPtr->getPointeeType()->isFunctionType() !=
+ SrcMemPtr->getPointeeType()->isFunctionType()) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
+ << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away
+ // constness.
+ if (CastsAwayConstness(Self, SrcType, DestType)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
+ << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+
+ // A valid member pointer cast.
+ return;
+ }
+
+ // See below for the enumeral issue.
+ if (SrcType->isNullPtrType() && DestType->isIntegralType() &&
+ !DestType->isEnumeralType()) {
+ // C++0x 5.2.10p4: A pointer can be explicitly converted to any integral
+ // type large enough to hold it. A value of std::nullptr_t can be
+ // converted to an integral type; the conversion has the same meaning
+ // and validity as a conversion of (void*)0 to the integral type.
+ if (Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_small_int)
+ << OrigDestType << DestRange;
+ }
+ return;
+ }
+
+ bool destIsPtr = DestType->isPointerType();
+ bool srcIsPtr = SrcType->isPointerType();
+ if (!destIsPtr && !srcIsPtr) {
+ // Except for std::nullptr_t->integer and lvalue->reference, which are
+ // handled above, at least one of the two arguments must be a pointer.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
+ << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+
+ if (SrcType == DestType) {
+ // C++ 5.2.10p2 has a note that mentions that, subject to all other
+ // restrictions, a cast to the same type is allowed. The intent is not
+ // entirely clear here, since all other paragraphs explicitly forbid casts
+ // to the same type. However, the behavior of compilers is pretty consistent
+ // on this point: allow same-type conversion if the involved types are
+ // pointers, disallow otherwise.
+ return;
+ }
+
+ // Note: Clang treats enumeration types as integral types. If this is ever
+ // changed for C++, the additional check here will be redundant.
+ if (DestType->isIntegralType() && !DestType->isEnumeralType()) {
+ assert(srcIsPtr && "One type must be a pointer");
+ // C++ 5.2.10p4: A pointer can be explicitly converted to any integral
+ // type large enough to hold it.
+ if (Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_small_int)
+ << OrigDestType << DestRange;
+ }
+ return;
+ }
+
+ if (SrcType->isIntegralType() || SrcType->isEnumeralType()) {
+ assert(destIsPtr && "One type must be a pointer");
+ // C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
+ // converted to a pointer.
+ return;
+ }
+
+ if (!destIsPtr || !srcIsPtr) {
+ // With the valid non-pointer conversions out of the way, we can be even
+ // more stringent.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
+ << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
+ if (CastsAwayConstness(Self, SrcType, DestType)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
+ << "reinterpret_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+
+ // Not casting away constness, so the only remaining check is for compatible
+ // pointer categories.
+
+ if (SrcType->isFunctionPointerType()) {
+ if (DestType->isFunctionPointerType()) {
+ // C++ 5.2.10p6: A pointer to a function can be explicitly converted to
+ // a pointer to a function of a different type.
+ return;
+ }
+
+ // C++0x 5.2.10p8: Converting a pointer to a function into a pointer to
+ // an object type or vice versa is conditionally-supported.
+ // Compilers support it in C++03 too, though, because it's necessary for
+ // casting the return value of dlsym() and GetProcAddress().
+ // FIXME: Conditionally-supported behavior should be configurable in the
+ // TargetInfo or similar.
+ if (!Self.getLangOptions().CPlusPlus0x) {
+ Self.Diag(OpRange.getBegin(), diag::ext_reinterpret_cast_fn_obj)
+ << OpRange;
+ }
+ return;
+ }
+
+ if (DestType->isFunctionPointerType()) {
+ // See above.
+ if (!Self.getLangOptions().CPlusPlus0x) {
+ Self.Diag(OpRange.getBegin(), diag::ext_reinterpret_cast_fn_obj)
+ << OpRange;
+ }
+ return;
+ }
+
+ // C++ 5.2.10p7: A pointer to an object can be explicitly converted to
+ // a pointer to an object of different type.
+ // Void pointers are not specified, but supported by every compiler out there.
+ // So we finish by allowing everything that remains - it's got to be two
+ // object pointers.
+}
+
+/// CastsAwayConstness - Check if the pointer conversion from SrcType to
+/// DestType casts away constness as defined in C++ 5.2.11p8ff. This is used by
+/// the cast checkers. Both arguments must denote pointer (possibly to member)
+/// types.
+bool
+CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType)
+{
+ // Casting away constness is defined in C++ 5.2.11p8 with reference to
+ // C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since
+ // the rules are non-trivial. So first we construct Tcv *...cv* as described
+ // in C++ 5.2.11p8.
+ assert((SrcType->isPointerType() || SrcType->isMemberPointerType()) &&
+ "Source type is not pointer or pointer to member.");
+ assert((DestType->isPointerType() || DestType->isMemberPointerType()) &&
+ "Destination type is not pointer or pointer to member.");
+
+ QualType UnwrappedSrcType = SrcType, UnwrappedDestType = DestType;
+ llvm::SmallVector<unsigned, 8> cv1, cv2;
+
+ // Find the qualifications.
+ while (Self.UnwrapSimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
+ cv1.push_back(UnwrappedSrcType.getCVRQualifiers());
+ cv2.push_back(UnwrappedDestType.getCVRQualifiers());
+ }
+ assert(cv1.size() > 0 && "Must have at least one pointer level.");
+
+ // Construct void pointers with those qualifiers (in reverse order of
+ // unwrapping, of course).
+ QualType SrcConstruct = Self.Context.VoidTy;
+ QualType DestConstruct = Self.Context.VoidTy;
+ for (llvm::SmallVector<unsigned, 8>::reverse_iterator i1 = cv1.rbegin(),
+ i2 = cv2.rbegin();
+ i1 != cv1.rend(); ++i1, ++i2)
+ {
+ SrcConstruct = Self.Context.getPointerType(
+ SrcConstruct.getQualifiedType(*i1));
+ DestConstruct = Self.Context.getPointerType(
+ DestConstruct.getQualifiedType(*i2));
+ }
+
+ // Test if they're compatible.
+ return SrcConstruct != DestConstruct &&
+ !Self.IsQualificationConversion(SrcConstruct, DestConstruct);
+}
+
+/// CheckStaticCast - Check that a static_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.9 for details. Static casts are mostly used for making
+/// implicit conversions explicit and getting rid of data loss warnings.
+void
+CheckStaticCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange)
+{
+ // The order the tests is not entirely arbitrary. There is one conversion
+ // that can be handled in two different ways. Given:
+ // struct A {};
+ // struct B : public A {
+ // B(); B(const A&);
+ // };
+ // const A &a = B();
+ // the cast static_cast<const B&>(a) could be seen as either a static
+ // reference downcast, or an explicit invocation of the user-defined
+ // conversion using B's conversion constructor.
+ // DR 427 specifies that the downcast is to be applied here.
+
+ // FIXME: With N2812, casts to rvalue refs will change.
+
+ // C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
+ if (DestType->isVoidType()) {
+ return;
+ }
+
+ // C++ 5.2.9p5, reference downcast.
+ // See the function for details.
+ // DR 427 specifies that this is to be applied before paragraph 2.
+ if (TryStaticReferenceDowncast(Self, SrcExpr, DestType, OpRange)
+ > TSC_NotApplicable) {
+ return;
+ }
+
+ // N2844 5.2.9p3: An lvalue of type "cv1 T1" can be cast to type "rvalue
+ // reference to cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ if (TryLValueToRValueCast(Self, SrcExpr, DestType, OpRange) >
+ TSC_NotApplicable) {
+ return;
+ }
+
+ // C++ 5.2.9p2: An expression e can be explicitly converted to a type T
+ // [...] if the declaration "T t(e);" is well-formed, [...].
+ if (TryStaticImplicitCast(Self, SrcExpr, DestType, OpRange) >
+ TSC_NotApplicable) {
+ return;
+ }
+
+ // C++ 5.2.9p6: May apply the reverse of any standard conversion, except
+ // lvalue-to-rvalue, array-to-pointer, function-to-pointer, and boolean
+ // conversions, subject to further restrictions.
+ // Also, C++ 5.2.9p1 forbids casting away constness, which makes reversal
+ // of qualification conversions impossible.
+
+ // The lvalue-to-rvalue, array-to-pointer and function-to-pointer conversions
+ // are applied to the expression.
+ QualType OrigSrcType = SrcExpr->getType();
+ Self.DefaultFunctionArrayConversion(SrcExpr);
+
+ QualType SrcType = Self.Context.getCanonicalType(SrcExpr->getType());
+
+ // Reverse integral promotion/conversion. All such conversions are themselves
+ // again integral promotions or conversions and are thus already handled by
+ // p2 (TryDirectInitialization above).
+ // (Note: any data loss warnings should be suppressed.)
+ // The exception is the reverse of enum->integer, i.e. integer->enum (and
+ // enum->enum). See also C++ 5.2.9p7.
+ // The same goes for reverse floating point promotion/conversion and
+ // floating-integral conversions. Again, only floating->enum is relevant.
+ if (DestType->isEnumeralType()) {
+ if (SrcType->isComplexType() || SrcType->isVectorType()) {
+ // Fall through - these cannot be converted.
+ } else if (SrcType->isArithmeticType() || SrcType->isEnumeralType()) {
+ return;
+ }
+ }
+
+ // Reverse pointer upcast. C++ 4.10p3 specifies pointer upcast.
+ // C++ 5.2.9p8 additionally disallows a cast path through virtual inheritance.
+ if (TryStaticPointerDowncast(Self, SrcType, DestType, OpRange)
+ > TSC_NotApplicable) {
+ return;
+ }
+
+ // Reverse member pointer conversion. C++ 4.11 specifies member pointer
+ // conversion. C++ 5.2.9p9 has additional information.
+ // DR54's access restrictions apply here also.
+ if (TryStaticMemberPointerUpcast(Self, SrcType, DestType, OpRange)
+ > TSC_NotApplicable) {
+ return;
+ }
+
+ // Reverse pointer conversion to void*. C++ 4.10.p2 specifies conversion to
+ // void*. C++ 5.2.9p10 specifies additional restrictions, which really is
+ // just the usual constness stuff.
+ if (const PointerType *SrcPointer = SrcType->getAsPointerType()) {
+ QualType SrcPointee = SrcPointer->getPointeeType();
+ if (SrcPointee->isVoidType()) {
+ if (const PointerType *DestPointer = DestType->getAsPointerType()) {
+ QualType DestPointee = DestPointer->getPointeeType();
+ if (DestPointee->isIncompleteOrObjectType()) {
+ // This is definitely the intended conversion, but it might fail due
+ // to a const violation.
+ if (!DestPointee.isAtLeastAsQualifiedAs(SrcPointee)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
+ << "static_cast" << DestType << OrigSrcType << OpRange;
+ }
+ return;
+ }
+ }
+ }
+ }
+
+ // We tried everything. Everything! Nothing works! :-(
+ // FIXME: Error reporting could be a lot better. Should store the reason why
+ // every substep failed and, at the end, select the most specific and report
+ // that.
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_generic)
+ << "static_cast" << DestType << OrigSrcType
+ << OpRange;
+}
+
+/// Tests whether a conversion according to N2844 is valid.
+TryStaticCastResult
+TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ const SourceRange &OpRange)
+{
+ // N2844 5.2.9p3: An lvalue of type "cv1 T1" can be cast to type "rvalue
+ // reference to cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ const RValueReferenceType *R = DestType->getAsRValueReferenceType();
+ if (!R)
+ return TSC_NotApplicable;
+
+ if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid)
+ return TSC_NotApplicable;
+
+ // Because we try the reference downcast before this function, from now on
+ // this is the only cast possibility, so we issue an error if we fail now.
+ bool DerivedToBase;
+ if (Self.CompareReferenceRelationship(SrcExpr->getType(), R->getPointeeType(),
+ DerivedToBase) <
+ Sema::Ref_Compatible_With_Added_Qualification) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_lvalue_to_rvalue_cast)
+ << SrcExpr->getType() << R->getPointeeType() << OpRange;
+ return TSC_Failed;
+ }
+
+ // FIXME: Similar to CheckReferenceInit, we actually need more AST annotation
+ // than nothing.
+ return TSC_Success;
+}
+
+/// Tests whether a conversion according to C++ 5.2.9p5 is valid.
+TryStaticCastResult
+TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ const SourceRange &OpRange)
+{
+ // C++ 5.2.9p5: An lvalue of type "cv1 B", where B is a class type, can be
+ // cast to type "reference to cv2 D", where D is a class derived from B,
+ // if a valid standard conversion from "pointer to D" to "pointer to B"
+ // exists, cv2 >= cv1, and B is not a virtual base class of D.
+ // In addition, DR54 clarifies that the base must be accessible in the
+ // current context. Although the wording of DR54 only applies to the pointer
+ // variant of this rule, the intent is clearly for it to apply to the this
+ // conversion as well.
+
+ if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
+ return TSC_NotApplicable;
+ }
+
+ const ReferenceType *DestReference = DestType->getAsReferenceType();
+ if (!DestReference) {
+ return TSC_NotApplicable;
+ }
+ QualType DestPointee = DestReference->getPointeeType();
+
+ return TryStaticDowncast(Self, SrcExpr->getType(), DestPointee, OpRange,
+ SrcExpr->getType(), DestType);
+}
+
+/// Tests whether a conversion according to C++ 5.2.9p8 is valid.
+TryStaticCastResult
+TryStaticPointerDowncast(Sema &Self, QualType SrcType, QualType DestType,
+ const SourceRange &OpRange)
+{
+ // C++ 5.2.9p8: An rvalue of type "pointer to cv1 B", where B is a class
+ // type, can be converted to an rvalue of type "pointer to cv2 D", where D
+ // is a class derived from B, if a valid standard conversion from "pointer
+ // to D" to "pointer to B" exists, cv2 >= cv1, and B is not a virtual base
+ // class of D.
+ // In addition, DR54 clarifies that the base must be accessible in the
+ // current context.
+
+ const PointerType *SrcPointer = SrcType->getAsPointerType();
+ if (!SrcPointer) {
+ return TSC_NotApplicable;
+ }
+
+ const PointerType *DestPointer = DestType->getAsPointerType();
+ if (!DestPointer) {
+ return TSC_NotApplicable;
+ }
+
+ return TryStaticDowncast(Self, SrcPointer->getPointeeType(),
+ DestPointer->getPointeeType(),
+ OpRange, SrcType, DestType);
+}
+
+/// TryStaticDowncast - Common functionality of TryStaticReferenceDowncast and
+/// TryStaticPointerDowncast. Tests whether a static downcast from SrcType to
+/// DestType, both of which must be canonical, is possible and allowed.
+TryStaticCastResult
+TryStaticDowncast(Sema &Self, QualType SrcType, QualType DestType,
+ const SourceRange &OpRange, QualType OrigSrcType,
+ QualType OrigDestType)
+{
+ // Downcast can only happen in class hierarchies, so we need classes.
+ if (!DestType->isRecordType() || !SrcType->isRecordType()) {
+ return TSC_NotApplicable;
+ }
+
+ BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/false,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(DestType, SrcType, Paths)) {
+ return TSC_NotApplicable;
+ }
+
+ // Target type does derive from source type. Now we're serious. If an error
+ // appears now, it's not ignored.
+ // This may not be entirely in line with the standard. Take for example:
+ // struct A {};
+ // struct B : virtual A {
+ // B(A&);
+ // };
+ //
+ // void f()
+ // {
+ // (void)static_cast<const B&>(*((A*)0));
+ // }
+ // As far as the standard is concerned, p5 does not apply (A is virtual), so
+ // p2 should be used instead - "const B& t(*((A*)0));" is perfectly valid.
+ // However, both GCC and Comeau reject this example, and accepting it would
+ // mean more complex code if we're to preserve the nice error message.
+ // FIXME: Being 100% compliant here would be nice to have.
+
+ // Must preserve cv, as always.
+ if (!DestType.isAtLeastAsQualifiedAs(SrcType)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
+ << "static_cast" << OrigDestType << OrigSrcType << OpRange;
+ return TSC_Failed;
+ }
+
+ if (Paths.isAmbiguous(SrcType.getUnqualifiedType())) {
+ // This code is analoguous to that in CheckDerivedToBaseConversion, except
+ // that it builds the paths in reverse order.
+ // To sum up: record all paths to the base and build a nice string from
+ // them. Use it to spice up the error message.
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ Self.IsDerivedFrom(DestType, SrcType, Paths);
+ std::string PathDisplayStr;
+ std::set<unsigned> DisplayedPaths;
+ for (BasePaths::paths_iterator Path = Paths.begin();
+ Path != Paths.end(); ++Path) {
+ if (DisplayedPaths.insert(Path->back().SubobjectNumber).second) {
+ // We haven't displayed a path to this particular base
+ // class subobject yet.
+ PathDisplayStr += "\n ";
+ for (BasePath::const_reverse_iterator Element = Path->rbegin();
+ Element != Path->rend(); ++Element)
+ PathDisplayStr += Element->Base->getType().getAsString() + " -> ";
+ PathDisplayStr += DestType.getAsString();
+ }
+ }
+
+ Self.Diag(OpRange.getBegin(), diag::err_ambiguous_base_to_derived_cast)
+ << SrcType.getUnqualifiedType() << DestType.getUnqualifiedType()
+ << PathDisplayStr << OpRange;
+ return TSC_Failed;
+ }
+
+ if (Paths.getDetectedVirtual() != 0) {
+ QualType VirtualBase(Paths.getDetectedVirtual(), 0);
+ Self.Diag(OpRange.getBegin(), diag::err_static_downcast_via_virtual)
+ << OrigSrcType << OrigDestType << VirtualBase << OpRange;
+ return TSC_Failed;
+ }
+
+ // FIXME: Test accessibility.
+
+ return TSC_Success;
+}
+
+/// TryStaticMemberPointerUpcast - Tests whether a conversion according to
+/// C++ 5.2.9p9 is valid:
+///
+/// An rvalue of type "pointer to member of D of type cv1 T" can be
+/// converted to an rvalue of type "pointer to member of B of type cv2 T",
+/// where B is a base class of D [...].
+///
+TryStaticCastResult
+TryStaticMemberPointerUpcast(Sema &Self, QualType SrcType, QualType DestType,
+ const SourceRange &OpRange)
+{
+ const MemberPointerType *SrcMemPtr = SrcType->getAsMemberPointerType();
+ if (!SrcMemPtr)
+ return TSC_NotApplicable;
+ const MemberPointerType *DestMemPtr = DestType->getAsMemberPointerType();
+ if (!DestMemPtr)
+ return TSC_NotApplicable;
+
+ // T == T, modulo cv
+ if (Self.Context.getCanonicalType(
+ SrcMemPtr->getPointeeType().getUnqualifiedType()) !=
+ Self.Context.getCanonicalType(DestMemPtr->getPointeeType().
+ getUnqualifiedType()))
+ return TSC_NotApplicable;
+
+ // B base of D
+ QualType SrcClass(SrcMemPtr->getClass(), 0);
+ QualType DestClass(DestMemPtr->getClass(), 0);
+ BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/false,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(SrcClass, DestClass, Paths)) {
+ return TSC_NotApplicable;
+ }
+
+ // B is a base of D. But is it an allowed base? If not, it's a hard error.
+ if (Paths.isAmbiguous(DestClass)) {
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ bool StillOkay = Self.IsDerivedFrom(SrcClass, DestClass, Paths);
+ assert(StillOkay);
+ StillOkay = StillOkay;
+ std::string PathDisplayStr = Self.getAmbiguousPathsDisplayString(Paths);
+ Self.Diag(OpRange.getBegin(), diag::err_ambiguous_memptr_conv)
+ << 1 << SrcClass << DestClass << PathDisplayStr << OpRange;
+ return TSC_Failed;
+ }
+
+ if (const RecordType *VBase = Paths.getDetectedVirtual()) {
+ Self.Diag(OpRange.getBegin(), diag::err_memptr_conv_via_virtual)
+ << SrcClass << DestClass << QualType(VBase, 0) << OpRange;
+ return TSC_Failed;
+ }
+
+ // FIXME: Test accessibility.
+
+ return TSC_Success;
+}
+
+/// TryStaticImplicitCast - Tests whether a conversion according to C++ 5.2.9p2
+/// is valid:
+///
+/// An expression e can be explicitly converted to a type T using a
+/// @c static_cast if the declaration "T t(e);" is well-formed [...].
+TryStaticCastResult
+TryStaticImplicitCast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ const SourceRange &OpRange)
+{
+ if (DestType->isReferenceType()) {
+ // At this point of CheckStaticCast, if the destination is a reference,
+ // this has to work. There is no other way that works.
+ return Self.CheckReferenceInit(SrcExpr, DestType) ?
+ TSC_Failed : TSC_Success;
+ }
+ if (DestType->isRecordType()) {
+ // FIXME: Use an implementation of C++ [over.match.ctor] for this.
+ return TSC_NotApplicable;
+ }
+
+ // FIXME: To get a proper error from invalid conversions here, we need to
+ // reimplement more of this.
+ ImplicitConversionSequence ICS = Self.TryImplicitConversion(
+ SrcExpr, DestType);
+ return ICS.ConversionKind == ImplicitConversionSequence::BadConversion ?
+ TSC_NotApplicable : TSC_Success;
+}
+
+/// CheckDynamicCast - Check that a dynamic_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.7 for details. Dynamic casts are used mostly for runtime-
+/// checked downcasts in class hierarchies.
+void
+CheckDynamicCast(Sema &Self, Expr *&SrcExpr, QualType DestType,
+ const SourceRange &OpRange,
+ const SourceRange &DestRange)
+{
+ QualType OrigDestType = DestType, OrigSrcType = SrcExpr->getType();
+ DestType = Self.Context.getCanonicalType(DestType);
+
+ // C++ 5.2.7p1: T shall be a pointer or reference to a complete class type,
+ // or "pointer to cv void".
+
+ QualType DestPointee;
+ const PointerType *DestPointer = DestType->getAsPointerType();
+ const ReferenceType *DestReference = DestType->getAsReferenceType();
+ if (DestPointer) {
+ DestPointee = DestPointer->getPointeeType();
+ } else if (DestReference) {
+ DestPointee = DestReference->getPointeeType();
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ref_or_ptr)
+ << OrigDestType << DestRange;
+ return;
+ }
+
+ const RecordType *DestRecord = DestPointee->getAsRecordType();
+ if (DestPointee->isVoidType()) {
+ assert(DestPointer && "Reference to void is not possible");
+ } else if (DestRecord) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), DestPointee,
+ diag::err_bad_dynamic_cast_incomplete,
+ DestRange))
+ return;
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
+ << DestPointee.getUnqualifiedType() << DestRange;
+ return;
+ }
+
+ // C++0x 5.2.7p2: If T is a pointer type, v shall be an rvalue of a pointer to
+ // complete class type, [...]. If T is an lvalue reference type, v shall be
+ // an lvalue of a complete class type, [...]. If T is an rvalue reference
+ // type, v shall be an expression having a complete effective class type,
+ // [...]
+
+ QualType SrcType = Self.Context.getCanonicalType(OrigSrcType);
+ QualType SrcPointee;
+ if (DestPointer) {
+ if (const PointerType *SrcPointer = SrcType->getAsPointerType()) {
+ SrcPointee = SrcPointer->getPointeeType();
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ptr)
+ << OrigSrcType << SrcExpr->getSourceRange();
+ return;
+ }
+ } else if (DestReference->isLValueReferenceType()) {
+ if (SrcExpr->isLvalue(Self.Context) != Expr::LV_Valid) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
+ << "dynamic_cast" << OrigDestType << OpRange;
+ }
+ SrcPointee = SrcType;
+ } else {
+ SrcPointee = SrcType;
+ }
+
+ const RecordType *SrcRecord = SrcPointee->getAsRecordType();
+ if (SrcRecord) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), SrcPointee,
+ diag::err_bad_dynamic_cast_incomplete,
+ SrcExpr->getSourceRange()))
+ return;
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
+ << SrcPointee.getUnqualifiedType() << SrcExpr->getSourceRange();
+ return;
+ }
+
+ assert((DestPointer || DestReference) &&
+ "Bad destination non-ptr/ref slipped through.");
+ assert((DestRecord || DestPointee->isVoidType()) &&
+ "Bad destination pointee slipped through.");
+ assert(SrcRecord && "Bad source pointee slipped through.");
+
+ // C++ 5.2.7p1: The dynamic_cast operator shall not cast away constness.
+ if (!DestPointee.isAtLeastAsQualifiedAs(SrcPointee)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_const_away)
+ << "dynamic_cast" << OrigDestType << OrigSrcType << OpRange;
+ return;
+ }
+
+ // C++ 5.2.7p3: If the type of v is the same as the required result type,
+ // [except for cv].
+ if (DestRecord == SrcRecord) {
+ return;
+ }
+
+ // C++ 5.2.7p5
+ // Upcasts are resolved statically.
+ if (DestRecord && Self.IsDerivedFrom(SrcPointee, DestPointee)) {
+ Self.CheckDerivedToBaseConversion(SrcPointee, DestPointee,
+ OpRange.getBegin(), OpRange);
+ // Diagnostic already emitted on error.
+ return;
+ }
+
+ // C++ 5.2.7p6: Otherwise, v shall be [polymorphic].
+ const RecordDecl *SrcDecl = SrcRecord->getDecl()->getDefinition(Self.Context);
+ assert(SrcDecl && "Definition missing");
+ if (!cast<CXXRecordDecl>(SrcDecl)->isPolymorphic()) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_polymorphic)
+ << SrcPointee.getUnqualifiedType() << SrcExpr->getSourceRange();
+ }
+
+ // Done. Everything else is run-time checks.
+}
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
new file mode 100644
index 0000000..98ee13a
--- /dev/null
+++ b/lib/Sema/SemaOverload.cpp
@@ -0,0 +1,4485 @@
+//===--- SemaOverload.cpp - C++ Overloading ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ overloading.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "SemaInherit.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeOrdering.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+
+namespace clang {
+
+/// GetConversionCategory - Retrieve the implicit conversion
+/// category corresponding to the given implicit conversion kind.
+ImplicitConversionCategory
+GetConversionCategory(ImplicitConversionKind Kind) {
+ static const ImplicitConversionCategory
+ Category[(int)ICK_Num_Conversion_Kinds] = {
+ ICC_Identity,
+ ICC_Lvalue_Transformation,
+ ICC_Lvalue_Transformation,
+ ICC_Lvalue_Transformation,
+ ICC_Qualification_Adjustment,
+ ICC_Promotion,
+ ICC_Promotion,
+ ICC_Promotion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion
+ };
+ return Category[(int)Kind];
+}
+
+/// GetConversionRank - Retrieve the implicit conversion rank
+/// corresponding to the given implicit conversion kind.
+ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind) {
+ static const ImplicitConversionRank
+ Rank[(int)ICK_Num_Conversion_Kinds] = {
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion
+ };
+ return Rank[(int)Kind];
+}
+
+/// GetImplicitConversionName - Return the name of this kind of
+/// implicit conversion.
+const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
+ static const char* Name[(int)ICK_Num_Conversion_Kinds] = {
+ "No conversion",
+ "Lvalue-to-rvalue",
+ "Array-to-pointer",
+ "Function-to-pointer",
+ "Qualification",
+ "Integral promotion",
+ "Floating point promotion",
+ "Complex promotion",
+ "Integral conversion",
+ "Floating conversion",
+ "Complex conversion",
+ "Floating-integral conversion",
+ "Complex-real conversion",
+ "Pointer conversion",
+ "Pointer-to-member conversion",
+ "Boolean conversion",
+ "Compatible-types conversion",
+ "Derived-to-base conversion"
+ };
+ return Name[Kind];
+}
+
+/// StandardConversionSequence - Set the standard conversion
+/// sequence to the identity conversion.
+void StandardConversionSequence::setAsIdentityConversion() {
+ First = ICK_Identity;
+ Second = ICK_Identity;
+ Third = ICK_Identity;
+ Deprecated = false;
+ ReferenceBinding = false;
+ DirectBinding = false;
+ RRefBinding = false;
+ CopyConstructor = 0;
+}
+
+/// getRank - Retrieve the rank of this standard conversion sequence
+/// (C++ 13.3.3.1.1p3). The rank is the largest rank of each of the
+/// implicit conversions.
+ImplicitConversionRank StandardConversionSequence::getRank() const {
+ ImplicitConversionRank Rank = ICR_Exact_Match;
+ if (GetConversionRank(First) > Rank)
+ Rank = GetConversionRank(First);
+ if (GetConversionRank(Second) > Rank)
+ Rank = GetConversionRank(Second);
+ if (GetConversionRank(Third) > Rank)
+ Rank = GetConversionRank(Third);
+ return Rank;
+}
+
+/// isPointerConversionToBool - Determines whether this conversion is
+/// a conversion of a pointer or pointer-to-member to bool. This is
+/// used as part of the ranking of standard conversion sequences
+/// (C++ 13.3.3.2p4).
+bool StandardConversionSequence::isPointerConversionToBool() const
+{
+ QualType FromType = QualType::getFromOpaquePtr(FromTypePtr);
+ QualType ToType = QualType::getFromOpaquePtr(ToTypePtr);
+
+ // Note that FromType has not necessarily been transformed by the
+ // array-to-pointer or function-to-pointer implicit conversions, so
+ // check for their presence as well as checking whether FromType is
+ // a pointer.
+ if (ToType->isBooleanType() &&
+ (FromType->isPointerType() || FromType->isBlockPointerType() ||
+ First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
+ return true;
+
+ return false;
+}
+
+/// isPointerConversionToVoidPointer - Determines whether this
+/// conversion is a conversion of a pointer to a void pointer. This is
+/// used as part of the ranking of standard conversion sequences (C++
+/// 13.3.3.2p4).
+bool
+StandardConversionSequence::
+isPointerConversionToVoidPointer(ASTContext& Context) const
+{
+ QualType FromType = QualType::getFromOpaquePtr(FromTypePtr);
+ QualType ToType = QualType::getFromOpaquePtr(ToTypePtr);
+
+ // Note that FromType has not necessarily been transformed by the
+ // array-to-pointer implicit conversion, so check for its presence
+ // and redo the conversion to get a pointer.
+ if (First == ICK_Array_To_Pointer)
+ FromType = Context.getArrayDecayedType(FromType);
+
+ if (Second == ICK_Pointer_Conversion)
+ if (const PointerType* ToPtrType = ToType->getAsPointerType())
+ return ToPtrType->getPointeeType()->isVoidType();
+
+ return false;
+}
+
+/// DebugPrint - Print this standard conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void StandardConversionSequence::DebugPrint() const {
+ bool PrintedSomething = false;
+ if (First != ICK_Identity) {
+ fprintf(stderr, "%s", GetImplicitConversionName(First));
+ PrintedSomething = true;
+ }
+
+ if (Second != ICK_Identity) {
+ if (PrintedSomething) {
+ fprintf(stderr, " -> ");
+ }
+ fprintf(stderr, "%s", GetImplicitConversionName(Second));
+
+ if (CopyConstructor) {
+ fprintf(stderr, " (by copy constructor)");
+ } else if (DirectBinding) {
+ fprintf(stderr, " (direct reference binding)");
+ } else if (ReferenceBinding) {
+ fprintf(stderr, " (reference binding)");
+ }
+ PrintedSomething = true;
+ }
+
+ if (Third != ICK_Identity) {
+ if (PrintedSomething) {
+ fprintf(stderr, " -> ");
+ }
+ fprintf(stderr, "%s", GetImplicitConversionName(Third));
+ PrintedSomething = true;
+ }
+
+ if (!PrintedSomething) {
+ fprintf(stderr, "No conversions required");
+ }
+}
+
+/// DebugPrint - Print this user-defined conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void UserDefinedConversionSequence::DebugPrint() const {
+ if (Before.First || Before.Second || Before.Third) {
+ Before.DebugPrint();
+ fprintf(stderr, " -> ");
+ }
+ fprintf(stderr, "'%s'", ConversionFunction->getNameAsString().c_str());
+ if (After.First || After.Second || After.Third) {
+ fprintf(stderr, " -> ");
+ After.DebugPrint();
+ }
+}
+
+/// DebugPrint - Print this implicit conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void ImplicitConversionSequence::DebugPrint() const {
+ switch (ConversionKind) {
+ case StandardConversion:
+ fprintf(stderr, "Standard conversion: ");
+ Standard.DebugPrint();
+ break;
+ case UserDefinedConversion:
+ fprintf(stderr, "User-defined conversion: ");
+ UserDefined.DebugPrint();
+ break;
+ case EllipsisConversion:
+ fprintf(stderr, "Ellipsis conversion");
+ break;
+ case BadConversion:
+ fprintf(stderr, "Bad conversion");
+ break;
+ }
+
+ fprintf(stderr, "\n");
+}
+
+// IsOverload - Determine whether the given New declaration is an
+// overload of the Old declaration. This routine returns false if New
+// and Old cannot be overloaded, e.g., if they are functions with the
+// same signature (C++ 1.3.10) or if the Old declaration isn't a
+// function (or overload set). When it does return false and Old is an
+// OverloadedFunctionDecl, MatchedDecl will be set to point to the
+// FunctionDecl that New cannot be overloaded with.
+//
+// Example: Given the following input:
+//
+// void f(int, float); // #1
+// void f(int, int); // #2
+// int f(int, int); // #3
+//
+// When we process #1, there is no previous declaration of "f",
+// so IsOverload will not be used.
+//
+// When we process #2, Old is a FunctionDecl for #1. By comparing the
+// parameter types, we see that #1 and #2 are overloaded (since they
+// have different signatures), so this routine returns false;
+// MatchedDecl is unchanged.
+//
+// When we process #3, Old is an OverloadedFunctionDecl containing #1
+// and #2. We compare the signatures of #3 to #1 (they're overloaded,
+// so we do nothing) and then #3 to #2. Since the signatures of #3 and
+// #2 are identical (return types of functions are not part of the
+// signature), IsOverload returns false and MatchedDecl will be set to
+// point to the FunctionDecl for #2.
+bool
+Sema::IsOverload(FunctionDecl *New, Decl* OldD,
+ OverloadedFunctionDecl::function_iterator& MatchedDecl)
+{
+ if (OverloadedFunctionDecl* Ovl = dyn_cast<OverloadedFunctionDecl>(OldD)) {
+ // Is this new function an overload of every function in the
+ // overload set?
+ OverloadedFunctionDecl::function_iterator Func = Ovl->function_begin(),
+ FuncEnd = Ovl->function_end();
+ for (; Func != FuncEnd; ++Func) {
+ if (!IsOverload(New, *Func, MatchedDecl)) {
+ MatchedDecl = Func;
+ return false;
+ }
+ }
+
+ // This function overloads every function in the overload set.
+ return true;
+ } else if (FunctionDecl* Old = dyn_cast<FunctionDecl>(OldD)) {
+ // Is the function New an overload of the function Old?
+ QualType OldQType = Context.getCanonicalType(Old->getType());
+ QualType NewQType = Context.getCanonicalType(New->getType());
+
+ // Compare the signatures (C++ 1.3.10) of the two functions to
+ // determine whether they are overloads. If we find any mismatch
+ // in the signature, they are overloads.
+
+ // If either of these functions is a K&R-style function (no
+ // prototype), then we consider them to have matching signatures.
+ if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) ||
+ isa<FunctionNoProtoType>(NewQType.getTypePtr()))
+ return false;
+
+ FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType.getTypePtr());
+ FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType.getTypePtr());
+
+ // The signature of a function includes the types of its
+ // parameters (C++ 1.3.10), which includes the presence or absence
+ // of the ellipsis; see C++ DR 357).
+ if (OldQType != NewQType &&
+ (OldType->getNumArgs() != NewType->getNumArgs() ||
+ OldType->isVariadic() != NewType->isVariadic() ||
+ !std::equal(OldType->arg_type_begin(), OldType->arg_type_end(),
+ NewType->arg_type_begin())))
+ return true;
+
+ // If the function is a class member, its signature includes the
+ // cv-qualifiers (if any) on the function itself.
+ //
+ // As part of this, also check whether one of the member functions
+ // is static, in which case they are not overloads (C++
+ // 13.1p2). While not part of the definition of the signature,
+ // this check is important to determine whether these functions
+ // can be overloaded.
+ CXXMethodDecl* OldMethod = dyn_cast<CXXMethodDecl>(Old);
+ CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New);
+ if (OldMethod && NewMethod &&
+ !OldMethod->isStatic() && !NewMethod->isStatic() &&
+ OldMethod->getTypeQualifiers() != NewMethod->getTypeQualifiers())
+ return true;
+
+ // The signatures match; this is not an overload.
+ return false;
+ } else {
+ // (C++ 13p1):
+ // Only function declarations can be overloaded; object and type
+ // declarations cannot be overloaded.
+ return false;
+ }
+}
+
+/// TryImplicitConversion - Attempt to perform an implicit conversion
+/// from the given expression (Expr) to the given type (ToType). This
+/// function returns an implicit conversion sequence that can be used
+/// to perform the initialization. Given
+///
+/// void f(float f);
+/// void g(int i) { f(i); }
+///
+/// this routine would produce an implicit conversion sequence to
+/// describe the initialization of f from i, which will be a standard
+/// conversion sequence containing an lvalue-to-rvalue conversion (C++
+/// 4.1) followed by a floating-integral conversion (C++ 4.9).
+//
+/// Note that this routine only determines how the conversion can be
+/// performed; it does not actually perform the conversion. As such,
+/// it will not produce any diagnostics if no conversion is available,
+/// but will instead return an implicit conversion sequence of kind
+/// "BadConversion".
+///
+/// If @p SuppressUserConversions, then user-defined conversions are
+/// not permitted.
+/// If @p AllowExplicit, then explicit user-defined conversions are
+/// permitted.
+/// If @p ForceRValue, then overloading is performed as if From was an rvalue,
+/// no matter its actual lvalueness.
+ImplicitConversionSequence
+Sema::TryImplicitConversion(Expr* From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit, bool ForceRValue)
+{
+ ImplicitConversionSequence ICS;
+ if (IsStandardConversion(From, ToType, ICS.Standard))
+ ICS.ConversionKind = ImplicitConversionSequence::StandardConversion;
+ else if (getLangOptions().CPlusPlus &&
+ IsUserDefinedConversion(From, ToType, ICS.UserDefined,
+ !SuppressUserConversions, AllowExplicit,
+ ForceRValue)) {
+ ICS.ConversionKind = ImplicitConversionSequence::UserDefinedConversion;
+ // C++ [over.ics.user]p4:
+ // A conversion of an expression of class type to the same class
+ // type is given Exact Match rank, and a conversion of an
+ // expression of class type to a base class of that type is
+ // given Conversion rank, in spite of the fact that a copy
+ // constructor (i.e., a user-defined conversion function) is
+ // called for those cases.
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ICS.UserDefined.ConversionFunction)) {
+ QualType FromCanon
+ = Context.getCanonicalType(From->getType().getUnqualifiedType());
+ QualType ToCanon = Context.getCanonicalType(ToType).getUnqualifiedType();
+ if (FromCanon == ToCanon || IsDerivedFrom(FromCanon, ToCanon)) {
+ // Turn this into a "standard" conversion sequence, so that it
+ // gets ranked with standard conversion sequences.
+ ICS.ConversionKind = ImplicitConversionSequence::StandardConversion;
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.FromTypePtr = From->getType().getAsOpaquePtr();
+ ICS.Standard.ToTypePtr = ToType.getAsOpaquePtr();
+ ICS.Standard.CopyConstructor = Constructor;
+ if (ToCanon != FromCanon)
+ ICS.Standard.Second = ICK_Derived_To_Base;
+ }
+ }
+
+ // C++ [over.best.ics]p4:
+ // However, when considering the argument of a user-defined
+ // conversion function that is a candidate by 13.3.1.3 when
+ // invoked for the copying of the temporary in the second step
+ // of a class copy-initialization, or by 13.3.1.4, 13.3.1.5, or
+ // 13.3.1.6 in all cases, only standard conversion sequences and
+ // ellipsis conversion sequences are allowed.
+ if (SuppressUserConversions &&
+ ICS.ConversionKind == ImplicitConversionSequence::UserDefinedConversion)
+ ICS.ConversionKind = ImplicitConversionSequence::BadConversion;
+ } else
+ ICS.ConversionKind = ImplicitConversionSequence::BadConversion;
+
+ return ICS;
+}
+
+/// IsStandardConversion - Determines whether there is a standard
+/// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the
+/// expression From to the type ToType. Standard conversion sequences
+/// only consider non-class types; for conversions that involve class
+/// types, use TryImplicitConversion. If a conversion exists, SCS will
+/// contain the standard conversion sequence required to perform this
+/// conversion and this routine will return true. Otherwise, this
+/// routine will return false and the value of SCS is unspecified.
+bool
+Sema::IsStandardConversion(Expr* From, QualType ToType,
+ StandardConversionSequence &SCS)
+{
+ QualType FromType = From->getType();
+
+ // Standard conversions (C++ [conv])
+ SCS.setAsIdentityConversion();
+ SCS.Deprecated = false;
+ SCS.IncompatibleObjC = false;
+ SCS.FromTypePtr = FromType.getAsOpaquePtr();
+ SCS.CopyConstructor = 0;
+
+ // There are no standard conversions for class types in C++, so
+ // abort early. When overloading in C, however, we do permit
+ if (FromType->isRecordType() || ToType->isRecordType()) {
+ if (getLangOptions().CPlusPlus)
+ return false;
+
+ // When we're overloading in C, we allow, as standard conversions,
+ }
+
+ // The first conversion can be an lvalue-to-rvalue conversion,
+ // array-to-pointer conversion, or function-to-pointer conversion
+ // (C++ 4p1).
+
+ // Lvalue-to-rvalue conversion (C++ 4.1):
+ // An lvalue (3.10) of a non-function, non-array type T can be
+ // converted to an rvalue.
+ Expr::isLvalueResult argIsLvalue = From->isLvalue(Context);
+ if (argIsLvalue == Expr::LV_Valid &&
+ !FromType->isFunctionType() && !FromType->isArrayType() &&
+ Context.getCanonicalType(FromType) != Context.OverloadTy) {
+ SCS.First = ICK_Lvalue_To_Rvalue;
+
+ // If T is a non-class type, the type of the rvalue is the
+ // cv-unqualified version of T. Otherwise, the type of the rvalue
+ // is T (C++ 4.1p1). C++ can't get here with class types; in C, we
+ // just strip the qualifiers because they don't matter.
+
+ // FIXME: Doesn't see through to qualifiers behind a typedef!
+ FromType = FromType.getUnqualifiedType();
+ }
+ // Array-to-pointer conversion (C++ 4.2)
+ else if (FromType->isArrayType()) {
+ SCS.First = ICK_Array_To_Pointer;
+
+ // An lvalue or rvalue of type "array of N T" or "array of unknown
+ // bound of T" can be converted to an rvalue of type "pointer to
+ // T" (C++ 4.2p1).
+ FromType = Context.getArrayDecayedType(FromType);
+
+ if (IsStringLiteralToNonConstPointerConversion(From, ToType)) {
+ // This conversion is deprecated. (C++ D.4).
+ SCS.Deprecated = true;
+
+ // For the purpose of ranking in overload resolution
+ // (13.3.3.1.1), this conversion is considered an
+ // array-to-pointer conversion followed by a qualification
+ // conversion (4.4). (C++ 4.2p2)
+ SCS.Second = ICK_Identity;
+ SCS.Third = ICK_Qualification;
+ SCS.ToTypePtr = ToType.getAsOpaquePtr();
+ return true;
+ }
+ }
+ // Function-to-pointer conversion (C++ 4.3).
+ else if (FromType->isFunctionType() && argIsLvalue == Expr::LV_Valid) {
+ SCS.First = ICK_Function_To_Pointer;
+
+ // An lvalue of function type T can be converted to an rvalue of
+ // type "pointer to T." The result is a pointer to the
+ // function. (C++ 4.3p1).
+ FromType = Context.getPointerType(FromType);
+ }
+ // Address of overloaded function (C++ [over.over]).
+ else if (FunctionDecl *Fn
+ = ResolveAddressOfOverloadedFunction(From, ToType, false)) {
+ SCS.First = ICK_Function_To_Pointer;
+
+ // We were able to resolve the address of the overloaded function,
+ // so we can convert to the type of that function.
+ FromType = Fn->getType();
+ if (ToType->isLValueReferenceType())
+ FromType = Context.getLValueReferenceType(FromType);
+ else if (ToType->isRValueReferenceType())
+ FromType = Context.getRValueReferenceType(FromType);
+ else if (ToType->isMemberPointerType()) {
+ // Resolve address only succeeds if both sides are member pointers,
+ // but it doesn't have to be the same class. See DR 247.
+ // Note that this means that the type of &Derived::fn can be
+ // Ret (Base::*)(Args) if the fn overload actually found is from the
+ // base class, even if it was brought into the derived class via a
+ // using declaration. The standard isn't clear on this issue at all.
+ CXXMethodDecl *M = cast<CXXMethodDecl>(Fn);
+ FromType = Context.getMemberPointerType(FromType,
+ Context.getTypeDeclType(M->getParent()).getTypePtr());
+ } else
+ FromType = Context.getPointerType(FromType);
+ }
+ // We don't require any conversions for the first step.
+ else {
+ SCS.First = ICK_Identity;
+ }
+
+ // The second conversion can be an integral promotion, floating
+ // point promotion, integral conversion, floating point conversion,
+ // floating-integral conversion, pointer conversion,
+ // pointer-to-member conversion, or boolean conversion (C++ 4p1).
+ // For overloading in C, this can also be a "compatible-type"
+ // conversion.
+ bool IncompatibleObjC = false;
+ if (Context.hasSameUnqualifiedType(FromType, ToType)) {
+ // The unqualified versions of the types are the same: there's no
+ // conversion to do.
+ SCS.Second = ICK_Identity;
+ }
+ // Integral promotion (C++ 4.5).
+ else if (IsIntegralPromotion(From, FromType, ToType)) {
+ SCS.Second = ICK_Integral_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Floating point promotion (C++ 4.6).
+ else if (IsFloatingPointPromotion(FromType, ToType)) {
+ SCS.Second = ICK_Floating_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Complex promotion (Clang extension)
+ else if (IsComplexPromotion(FromType, ToType)) {
+ SCS.Second = ICK_Complex_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Integral conversions (C++ 4.7).
+ // FIXME: isIntegralType shouldn't be true for enums in C++.
+ else if ((FromType->isIntegralType() || FromType->isEnumeralType()) &&
+ (ToType->isIntegralType() && !ToType->isEnumeralType())) {
+ SCS.Second = ICK_Integral_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Floating point conversions (C++ 4.8).
+ else if (FromType->isFloatingType() && ToType->isFloatingType()) {
+ SCS.Second = ICK_Floating_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Complex conversions (C99 6.3.1.6)
+ else if (FromType->isComplexType() && ToType->isComplexType()) {
+ SCS.Second = ICK_Complex_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Floating-integral conversions (C++ 4.9).
+ // FIXME: isIntegralType shouldn't be true for enums in C++.
+ else if ((FromType->isFloatingType() &&
+ ToType->isIntegralType() && !ToType->isBooleanType() &&
+ !ToType->isEnumeralType()) ||
+ ((FromType->isIntegralType() || FromType->isEnumeralType()) &&
+ ToType->isFloatingType())) {
+ SCS.Second = ICK_Floating_Integral;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Complex-real conversions (C99 6.3.1.7)
+ else if ((FromType->isComplexType() && ToType->isArithmeticType()) ||
+ (ToType->isComplexType() && FromType->isArithmeticType())) {
+ SCS.Second = ICK_Complex_Real;
+ FromType = ToType.getUnqualifiedType();
+ }
+ // Pointer conversions (C++ 4.10).
+ else if (IsPointerConversion(From, FromType, ToType, FromType,
+ IncompatibleObjC)) {
+ SCS.Second = ICK_Pointer_Conversion;
+ SCS.IncompatibleObjC = IncompatibleObjC;
+ }
+ // Pointer to member conversions (4.11).
+ else if (IsMemberPointerConversion(From, FromType, ToType, FromType)) {
+ SCS.Second = ICK_Pointer_Member;
+ }
+ // Boolean conversions (C++ 4.12).
+ else if (ToType->isBooleanType() &&
+ (FromType->isArithmeticType() ||
+ FromType->isEnumeralType() ||
+ FromType->isPointerType() ||
+ FromType->isBlockPointerType() ||
+ FromType->isMemberPointerType() ||
+ FromType->isNullPtrType())) {
+ SCS.Second = ICK_Boolean_Conversion;
+ FromType = Context.BoolTy;
+ }
+ // Compatible conversions (Clang extension for C function overloading)
+ else if (!getLangOptions().CPlusPlus &&
+ Context.typesAreCompatible(ToType, FromType)) {
+ SCS.Second = ICK_Compatible_Conversion;
+ } else {
+ // No second conversion required.
+ SCS.Second = ICK_Identity;
+ }
+
+ QualType CanonFrom;
+ QualType CanonTo;
+ // The third conversion can be a qualification conversion (C++ 4p1).
+ if (IsQualificationConversion(FromType, ToType)) {
+ SCS.Third = ICK_Qualification;
+ FromType = ToType;
+ CanonFrom = Context.getCanonicalType(FromType);
+ CanonTo = Context.getCanonicalType(ToType);
+ } else {
+ // No conversion required
+ SCS.Third = ICK_Identity;
+
+ // C++ [over.best.ics]p6:
+ // [...] Any difference in top-level cv-qualification is
+ // subsumed by the initialization itself and does not constitute
+ // a conversion. [...]
+ CanonFrom = Context.getCanonicalType(FromType);
+ CanonTo = Context.getCanonicalType(ToType);
+ if (CanonFrom.getUnqualifiedType() == CanonTo.getUnqualifiedType() &&
+ CanonFrom.getCVRQualifiers() != CanonTo.getCVRQualifiers()) {
+ FromType = ToType;
+ CanonFrom = CanonTo;
+ }
+ }
+
+ // If we have not converted the argument type to the parameter type,
+ // this is a bad conversion sequence.
+ if (CanonFrom != CanonTo)
+ return false;
+
+ SCS.ToTypePtr = FromType.getAsOpaquePtr();
+ return true;
+}
+
+/// IsIntegralPromotion - Determines whether the conversion from the
+/// expression From (whose potentially-adjusted type is FromType) to
+/// ToType is an integral promotion (C++ 4.5). If so, returns true and
+/// sets PromotedType to the promoted type.
+bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType)
+{
+ const BuiltinType *To = ToType->getAsBuiltinType();
+ // All integers are built-in.
+ if (!To) {
+ return false;
+ }
+
+ // An rvalue of type char, signed char, unsigned char, short int, or
+ // unsigned short int can be converted to an rvalue of type int if
+ // int can represent all the values of the source type; otherwise,
+ // the source rvalue can be converted to an rvalue of type unsigned
+ // int (C++ 4.5p1).
+ if (FromType->isPromotableIntegerType() && !FromType->isBooleanType()) {
+ if (// We can promote any signed, promotable integer type to an int
+ (FromType->isSignedIntegerType() ||
+ // We can promote any unsigned integer type whose size is
+ // less than int to an int.
+ (!FromType->isSignedIntegerType() &&
+ Context.getTypeSize(FromType) < Context.getTypeSize(ToType)))) {
+ return To->getKind() == BuiltinType::Int;
+ }
+
+ return To->getKind() == BuiltinType::UInt;
+ }
+
+ // An rvalue of type wchar_t (3.9.1) or an enumeration type (7.2)
+ // can be converted to an rvalue of the first of the following types
+ // that can represent all the values of its underlying type: int,
+ // unsigned int, long, or unsigned long (C++ 4.5p2).
+ if ((FromType->isEnumeralType() || FromType->isWideCharType())
+ && ToType->isIntegerType()) {
+ // Determine whether the type we're converting from is signed or
+ // unsigned.
+ bool FromIsSigned;
+ uint64_t FromSize = Context.getTypeSize(FromType);
+ if (const EnumType *FromEnumType = FromType->getAsEnumType()) {
+ QualType UnderlyingType = FromEnumType->getDecl()->getIntegerType();
+ FromIsSigned = UnderlyingType->isSignedIntegerType();
+ } else {
+ // FIXME: Is wchar_t signed or unsigned? We assume it's signed for now.
+ FromIsSigned = true;
+ }
+
+ // The types we'll try to promote to, in the appropriate
+ // order. Try each of these types.
+ QualType PromoteTypes[6] = {
+ Context.IntTy, Context.UnsignedIntTy,
+ Context.LongTy, Context.UnsignedLongTy ,
+ Context.LongLongTy, Context.UnsignedLongLongTy
+ };
+ for (int Idx = 0; Idx < 6; ++Idx) {
+ uint64_t ToSize = Context.getTypeSize(PromoteTypes[Idx]);
+ if (FromSize < ToSize ||
+ (FromSize == ToSize &&
+ FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) {
+ // We found the type that we can promote to. If this is the
+ // type we wanted, we have a promotion. Otherwise, no
+ // promotion.
+ return Context.getCanonicalType(ToType).getUnqualifiedType()
+ == Context.getCanonicalType(PromoteTypes[Idx]).getUnqualifiedType();
+ }
+ }
+ }
+
+ // An rvalue for an integral bit-field (9.6) can be converted to an
+ // rvalue of type int if int can represent all the values of the
+ // bit-field; otherwise, it can be converted to unsigned int if
+ // unsigned int can represent all the values of the bit-field. If
+ // the bit-field is larger yet, no integral promotion applies to
+ // it. If the bit-field has an enumerated type, it is treated as any
+ // other value of that type for promotion purposes (C++ 4.5p3).
+ // FIXME: We should delay checking of bit-fields until we actually perform the
+ // conversion.
+ using llvm::APSInt;
+ if (From)
+ if (FieldDecl *MemberDecl = From->getBitField()) {
+ APSInt BitWidth;
+ if (FromType->isIntegralType() && !FromType->isEnumeralType() &&
+ MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) {
+ APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned());
+ ToSize = Context.getTypeSize(ToType);
+
+ // Are we promoting to an int from a bitfield that fits in an int?
+ if (BitWidth < ToSize ||
+ (FromType->isSignedIntegerType() && BitWidth <= ToSize)) {
+ return To->getKind() == BuiltinType::Int;
+ }
+
+ // Are we promoting to an unsigned int from an unsigned bitfield
+ // that fits into an unsigned int?
+ if (FromType->isUnsignedIntegerType() && BitWidth <= ToSize) {
+ return To->getKind() == BuiltinType::UInt;
+ }
+
+ return false;
+ }
+ }
+
+ // An rvalue of type bool can be converted to an rvalue of type int,
+ // with false becoming zero and true becoming one (C++ 4.5p4).
+ if (FromType->isBooleanType() && To->getKind() == BuiltinType::Int) {
+ return true;
+ }
+
+ return false;
+}
+
+/// IsFloatingPointPromotion - Determines whether the conversion from
+/// FromType to ToType is a floating point promotion (C++ 4.6). If so,
+/// returns true and sets PromotedType to the promoted type.
+bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType)
+{
+ /// An rvalue of type float can be converted to an rvalue of type
+ /// double. (C++ 4.6p1).
+ if (const BuiltinType *FromBuiltin = FromType->getAsBuiltinType())
+ if (const BuiltinType *ToBuiltin = ToType->getAsBuiltinType()) {
+ if (FromBuiltin->getKind() == BuiltinType::Float &&
+ ToBuiltin->getKind() == BuiltinType::Double)
+ return true;
+
+ // C99 6.3.1.5p1:
+ // When a float is promoted to double or long double, or a
+ // double is promoted to long double [...].
+ if (!getLangOptions().CPlusPlus &&
+ (FromBuiltin->getKind() == BuiltinType::Float ||
+ FromBuiltin->getKind() == BuiltinType::Double) &&
+ (ToBuiltin->getKind() == BuiltinType::LongDouble))
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Determine if a conversion is a complex promotion.
+///
+/// A complex promotion is defined as a complex -> complex conversion
+/// where the conversion between the underlying real types is a
+/// floating-point or integral promotion.
+bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) {
+ const ComplexType *FromComplex = FromType->getAsComplexType();
+ if (!FromComplex)
+ return false;
+
+ const ComplexType *ToComplex = ToType->getAsComplexType();
+ if (!ToComplex)
+ return false;
+
+ return IsFloatingPointPromotion(FromComplex->getElementType(),
+ ToComplex->getElementType()) ||
+ IsIntegralPromotion(0, FromComplex->getElementType(),
+ ToComplex->getElementType());
+}
+
+/// BuildSimilarlyQualifiedPointerType - In a pointer conversion from
+/// the pointer type FromPtr to a pointer to type ToPointee, with the
+/// same type qualifiers as FromPtr has on its pointee type. ToType,
+/// if non-empty, will be a pointer to ToType that may or may not have
+/// the right set of qualifiers on its pointee.
+static QualType
+BuildSimilarlyQualifiedPointerType(const PointerType *FromPtr,
+ QualType ToPointee, QualType ToType,
+ ASTContext &Context) {
+ QualType CanonFromPointee = Context.getCanonicalType(FromPtr->getPointeeType());
+ QualType CanonToPointee = Context.getCanonicalType(ToPointee);
+ unsigned Quals = CanonFromPointee.getCVRQualifiers();
+
+ // Exact qualifier match -> return the pointer type we're converting to.
+ if (CanonToPointee.getCVRQualifiers() == Quals) {
+ // ToType is exactly what we need. Return it.
+ if (ToType.getTypePtr())
+ return ToType;
+
+ // Build a pointer to ToPointee. It has the right qualifiers
+ // already.
+ return Context.getPointerType(ToPointee);
+ }
+
+ // Just build a canonical type that has the right qualifiers.
+ return Context.getPointerType(CanonToPointee.getQualifiedType(Quals));
+}
+
+/// IsPointerConversion - Determines whether the conversion of the
+/// expression From, which has the (possibly adjusted) type FromType,
+/// can be converted to the type ToType via a pointer conversion (C++
+/// 4.10). If so, returns true and places the converted type (that
+/// might differ from ToType in its cv-qualifiers at some level) into
+/// ConvertedType.
+///
+/// This routine also supports conversions to and from block pointers
+/// and conversions with Objective-C's 'id', 'id<protocols...>', and
+/// pointers to interfaces. FIXME: Once we've determined the
+/// appropriate overloading rules for Objective-C, we may want to
+/// split the Objective-C checks into a different routine; however,
+/// GCC seems to consider all of these conversions to be pointer
+/// conversions, so for now they live here. IncompatibleObjC will be
+/// set if the conversion is an allowed Objective-C conversion that
+/// should result in a warning.
+bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
+ QualType& ConvertedType,
+ bool &IncompatibleObjC)
+{
+ IncompatibleObjC = false;
+ if (isObjCPointerConversion(FromType, ToType, ConvertedType, IncompatibleObjC))
+ return true;
+
+ // Conversion from a null pointer constant to any Objective-C pointer type.
+ if (Context.isObjCObjectPointerType(ToType) &&
+ From->isNullPointerConstant(Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Blocks: Block pointers can be converted to void*.
+ if (FromType->isBlockPointerType() && ToType->isPointerType() &&
+ ToType->getAsPointerType()->getPointeeType()->isVoidType()) {
+ ConvertedType = ToType;
+ return true;
+ }
+ // Blocks: A null pointer constant can be converted to a block
+ // pointer type.
+ if (ToType->isBlockPointerType() && From->isNullPointerConstant(Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // If the left-hand-side is nullptr_t, the right side can be a null
+ // pointer constant.
+ if (ToType->isNullPtrType() && From->isNullPointerConstant(Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ const PointerType* ToTypePtr = ToType->getAsPointerType();
+ if (!ToTypePtr)
+ return false;
+
+ // A null pointer constant can be converted to a pointer type (C++ 4.10p1).
+ if (From->isNullPointerConstant(Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Beyond this point, both types need to be pointers.
+ const PointerType *FromTypePtr = FromType->getAsPointerType();
+ if (!FromTypePtr)
+ return false;
+
+ QualType FromPointeeType = FromTypePtr->getPointeeType();
+ QualType ToPointeeType = ToTypePtr->getPointeeType();
+
+ // An rvalue of type "pointer to cv T," where T is an object type,
+ // can be converted to an rvalue of type "pointer to cv void" (C++
+ // 4.10p2).
+ if (FromPointeeType->isObjectType() && ToPointeeType->isVoidType()) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // When we're overloading in C, we allow a special kind of pointer
+ // conversion for compatible-but-not-identical pointee types.
+ if (!getLangOptions().CPlusPlus &&
+ Context.typesAreCompatible(FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // C++ [conv.ptr]p3:
+ //
+ // An rvalue of type "pointer to cv D," where D is a class type,
+ // can be converted to an rvalue of type "pointer to cv B," where
+ // B is a base class (clause 10) of D. If B is an inaccessible
+ // (clause 11) or ambiguous (10.2) base class of D, a program that
+ // necessitates this conversion is ill-formed. The result of the
+ // conversion is a pointer to the base class sub-object of the
+ // derived class object. The null pointer value is converted to
+ // the null pointer value of the destination type.
+ //
+ // Note that we do not check for ambiguity or inaccessibility
+ // here. That is handled by CheckPointerConversion.
+ if (getLangOptions().CPlusPlus &&
+ FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
+ IsDerivedFrom(FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ return false;
+}
+
+/// isObjCPointerConversion - Determines whether this is an
+/// Objective-C pointer conversion. Subroutine of IsPointerConversion,
+/// with the same arguments and return values.
+bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType,
+ bool &IncompatibleObjC) {
+ if (!getLangOptions().ObjC1)
+ return false;
+
+ // Conversions with Objective-C's id<...>.
+ if ((FromType->isObjCQualifiedIdType() || ToType->isObjCQualifiedIdType()) &&
+ ObjCQualifiedIdTypesAreCompatible(ToType, FromType, /*compare=*/false)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Beyond this point, both types need to be pointers or block pointers.
+ QualType ToPointeeType;
+ const PointerType* ToTypePtr = ToType->getAsPointerType();
+ if (ToTypePtr)
+ ToPointeeType = ToTypePtr->getPointeeType();
+ else if (const BlockPointerType *ToBlockPtr = ToType->getAsBlockPointerType())
+ ToPointeeType = ToBlockPtr->getPointeeType();
+ else
+ return false;
+
+ QualType FromPointeeType;
+ const PointerType *FromTypePtr = FromType->getAsPointerType();
+ if (FromTypePtr)
+ FromPointeeType = FromTypePtr->getPointeeType();
+ else if (const BlockPointerType *FromBlockPtr
+ = FromType->getAsBlockPointerType())
+ FromPointeeType = FromBlockPtr->getPointeeType();
+ else
+ return false;
+
+ // Objective C++: We're able to convert from a pointer to an
+ // interface to a pointer to a different interface.
+ const ObjCInterfaceType* FromIface = FromPointeeType->getAsObjCInterfaceType();
+ const ObjCInterfaceType* ToIface = ToPointeeType->getAsObjCInterfaceType();
+ if (FromIface && ToIface &&
+ Context.canAssignObjCInterfaces(ToIface, FromIface)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ if (FromIface && ToIface &&
+ Context.canAssignObjCInterfaces(FromIface, ToIface)) {
+ // Okay: this is some kind of implicit downcast of Objective-C
+ // interfaces, which is permitted. However, we're going to
+ // complain about it.
+ IncompatibleObjC = true;
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // Objective C++: We're able to convert between "id" and a pointer
+ // to any interface (in both directions).
+ if ((FromIface && Context.isObjCIdStructType(ToPointeeType))
+ || (ToIface && Context.isObjCIdStructType(FromPointeeType))) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // Objective C++: Allow conversions between the Objective-C "id" and
+ // "Class", in either direction.
+ if ((Context.isObjCIdStructType(FromPointeeType) &&
+ Context.isObjCClassStructType(ToPointeeType)) ||
+ (Context.isObjCClassStructType(FromPointeeType) &&
+ Context.isObjCIdStructType(ToPointeeType))) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // If we have pointers to pointers, recursively check whether this
+ // is an Objective-C conversion.
+ if (FromPointeeType->isPointerType() && ToPointeeType->isPointerType() &&
+ isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
+ IncompatibleObjC)) {
+ // We always complain about this conversion.
+ IncompatibleObjC = true;
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // If we have pointers to functions or blocks, check whether the only
+ // differences in the argument and result types are in Objective-C
+ // pointer conversions. If so, we permit the conversion (but
+ // complain about it).
+ const FunctionProtoType *FromFunctionType
+ = FromPointeeType->getAsFunctionProtoType();
+ const FunctionProtoType *ToFunctionType
+ = ToPointeeType->getAsFunctionProtoType();
+ if (FromFunctionType && ToFunctionType) {
+ // If the function types are exactly the same, this isn't an
+ // Objective-C pointer conversion.
+ if (Context.getCanonicalType(FromPointeeType)
+ == Context.getCanonicalType(ToPointeeType))
+ return false;
+
+ // Perform the quick checks that will tell us whether these
+ // function types are obviously different.
+ if (FromFunctionType->getNumArgs() != ToFunctionType->getNumArgs() ||
+ FromFunctionType->isVariadic() != ToFunctionType->isVariadic() ||
+ FromFunctionType->getTypeQuals() != ToFunctionType->getTypeQuals())
+ return false;
+
+ bool HasObjCConversion = false;
+ if (Context.getCanonicalType(FromFunctionType->getResultType())
+ == Context.getCanonicalType(ToFunctionType->getResultType())) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(FromFunctionType->getResultType(),
+ ToFunctionType->getResultType(),
+ ConvertedType, IncompatibleObjC)) {
+ // Okay, we have an Objective-C pointer conversion.
+ HasObjCConversion = true;
+ } else {
+ // Function types are too different. Abort.
+ return false;
+ }
+
+ // Check argument types.
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ QualType FromArgType = FromFunctionType->getArgType(ArgIdx);
+ QualType ToArgType = ToFunctionType->getArgType(ArgIdx);
+ if (Context.getCanonicalType(FromArgType)
+ == Context.getCanonicalType(ToArgType)) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(FromArgType, ToArgType,
+ ConvertedType, IncompatibleObjC)) {
+ // Okay, we have an Objective-C pointer conversion.
+ HasObjCConversion = true;
+ } else {
+ // Argument types are too different. Abort.
+ return false;
+ }
+ }
+
+ if (HasObjCConversion) {
+ // We had an Objective-C conversion. Allow this pointer
+ // conversion, but complain about it.
+ ConvertedType = ToType;
+ IncompatibleObjC = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// CheckPointerConversion - Check the pointer conversion from the
+/// expression From to the type ToType. This routine checks for
+/// ambiguous (FIXME: or inaccessible) derived-to-base pointer
+/// conversions for which IsPointerConversion has already returned
+/// true. It returns true and produces a diagnostic if there was an
+/// error, or returns false otherwise.
+bool Sema::CheckPointerConversion(Expr *From, QualType ToType) {
+ QualType FromType = From->getType();
+
+ if (const PointerType *FromPtrType = FromType->getAsPointerType())
+ if (const PointerType *ToPtrType = ToType->getAsPointerType()) {
+ QualType FromPointeeType = FromPtrType->getPointeeType(),
+ ToPointeeType = ToPtrType->getPointeeType();
+
+ // Objective-C++ conversions are always okay.
+ // FIXME: We should have a different class of conversions for the
+ // Objective-C++ implicit conversions.
+ if (Context.isObjCIdStructType(FromPointeeType) ||
+ Context.isObjCIdStructType(ToPointeeType) ||
+ Context.isObjCClassStructType(FromPointeeType) ||
+ Context.isObjCClassStructType(ToPointeeType))
+ return false;
+
+ if (FromPointeeType->isRecordType() &&
+ ToPointeeType->isRecordType()) {
+ // We must have a derived-to-base conversion. Check an
+ // ambiguous or inaccessible conversion.
+ return CheckDerivedToBaseConversion(FromPointeeType, ToPointeeType,
+ From->getExprLoc(),
+ From->getSourceRange());
+ }
+ }
+
+ return false;
+}
+
+/// IsMemberPointerConversion - Determines whether the conversion of the
+/// expression From, which has the (possibly adjusted) type FromType, can be
+/// converted to the type ToType via a member pointer conversion (C++ 4.11).
+/// If so, returns true and places the converted type (that might differ from
+/// ToType in its cv-qualifiers at some level) into ConvertedType.
+bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
+ QualType ToType, QualType &ConvertedType)
+{
+ const MemberPointerType *ToTypePtr = ToType->getAsMemberPointerType();
+ if (!ToTypePtr)
+ return false;
+
+ // A null pointer constant can be converted to a member pointer (C++ 4.11p1)
+ if (From->isNullPointerConstant(Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Otherwise, both types have to be member pointers.
+ const MemberPointerType *FromTypePtr = FromType->getAsMemberPointerType();
+ if (!FromTypePtr)
+ return false;
+
+ // A pointer to member of B can be converted to a pointer to member of D,
+ // where D is derived from B (C++ 4.11p2).
+ QualType FromClass(FromTypePtr->getClass(), 0);
+ QualType ToClass(ToTypePtr->getClass(), 0);
+ // FIXME: What happens when these are dependent? Is this function even called?
+
+ if (IsDerivedFrom(ToClass, FromClass)) {
+ ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
+ ToClass.getTypePtr());
+ return true;
+ }
+
+ return false;
+}
+
+/// CheckMemberPointerConversion - Check the member pointer conversion from the
+/// expression From to the type ToType. This routine checks for ambiguous or
+/// virtual (FIXME: or inaccessible) base-to-derived member pointer conversions
+/// for which IsMemberPointerConversion has already returned true. It returns
+/// true and produces a diagnostic if there was an error, or returns false
+/// otherwise.
+bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType) {
+ QualType FromType = From->getType();
+ const MemberPointerType *FromPtrType = FromType->getAsMemberPointerType();
+ if (!FromPtrType)
+ return false;
+
+ const MemberPointerType *ToPtrType = ToType->getAsMemberPointerType();
+ assert(ToPtrType && "No member pointer cast has a target type "
+ "that is not a member pointer.");
+
+ QualType FromClass = QualType(FromPtrType->getClass(), 0);
+ QualType ToClass = QualType(ToPtrType->getClass(), 0);
+
+ // FIXME: What about dependent types?
+ assert(FromClass->isRecordType() && "Pointer into non-class.");
+ assert(ToClass->isRecordType() && "Pointer into non-class.");
+
+ BasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/false,
+ /*DetectVirtual=*/true);
+ bool DerivationOkay = IsDerivedFrom(ToClass, FromClass, Paths);
+ assert(DerivationOkay &&
+ "Should not have been called if derivation isn't OK.");
+ (void)DerivationOkay;
+
+ if (Paths.isAmbiguous(Context.getCanonicalType(FromClass).
+ getUnqualifiedType())) {
+ // Derivation is ambiguous. Redo the check to find the exact paths.
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ bool StillOkay = IsDerivedFrom(ToClass, FromClass, Paths);
+ assert(StillOkay && "Derivation changed due to quantum fluctuation.");
+ (void)StillOkay;
+
+ std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
+ Diag(From->getExprLoc(), diag::err_ambiguous_memptr_conv)
+ << 0 << FromClass << ToClass << PathDisplayStr << From->getSourceRange();
+ return true;
+ }
+
+ if (const RecordType *VBase = Paths.getDetectedVirtual()) {
+ Diag(From->getExprLoc(), diag::err_memptr_conv_via_virtual)
+ << FromClass << ToClass << QualType(VBase, 0)
+ << From->getSourceRange();
+ return true;
+ }
+
+ return false;
+}
+
+/// IsQualificationConversion - Determines whether the conversion from
+/// an rvalue of type FromType to ToType is a qualification conversion
+/// (C++ 4.4).
+bool
+Sema::IsQualificationConversion(QualType FromType, QualType ToType)
+{
+ FromType = Context.getCanonicalType(FromType);
+ ToType = Context.getCanonicalType(ToType);
+
+ // If FromType and ToType are the same type, this is not a
+ // qualification conversion.
+ if (FromType == ToType)
+ return false;
+
+ // (C++ 4.4p4):
+ // A conversion can add cv-qualifiers at levels other than the first
+ // in multi-level pointers, subject to the following rules: [...]
+ bool PreviousToQualsIncludeConst = true;
+ bool UnwrappedAnyPointer = false;
+ while (UnwrapSimilarPointerTypes(FromType, ToType)) {
+ // Within each iteration of the loop, we check the qualifiers to
+ // determine if this still looks like a qualification
+ // conversion. Then, if all is well, we unwrap one more level of
+ // pointers or pointers-to-members and do it all again
+ // until there are no more pointers or pointers-to-members left to
+ // unwrap.
+ UnwrappedAnyPointer = true;
+
+ // -- for every j > 0, if const is in cv 1,j then const is in cv
+ // 2,j, and similarly for volatile.
+ if (!ToType.isAtLeastAsQualifiedAs(FromType))
+ return false;
+
+ // -- if the cv 1,j and cv 2,j are different, then const is in
+ // every cv for 0 < k < j.
+ if (FromType.getCVRQualifiers() != ToType.getCVRQualifiers()
+ && !PreviousToQualsIncludeConst)
+ return false;
+
+ // Keep track of whether all prior cv-qualifiers in the "to" type
+ // include const.
+ PreviousToQualsIncludeConst
+ = PreviousToQualsIncludeConst && ToType.isConstQualified();
+ }
+
+ // We are left with FromType and ToType being the pointee types
+ // after unwrapping the original FromType and ToType the same number
+ // of types. If we unwrapped any pointers, and if FromType and
+ // ToType have the same unqualified type (since we checked
+ // qualifiers above), then this is a qualification conversion.
+ return UnwrappedAnyPointer &&
+ FromType.getUnqualifiedType() == ToType.getUnqualifiedType();
+}
+
+/// Determines whether there is a user-defined conversion sequence
+/// (C++ [over.ics.user]) that converts expression From to the type
+/// ToType. If such a conversion exists, User will contain the
+/// user-defined conversion sequence that performs such a conversion
+/// and this routine will return true. Otherwise, this routine returns
+/// false and User is unspecified.
+///
+/// \param AllowConversionFunctions true if the conversion should
+/// consider conversion functions at all. If false, only constructors
+/// will be considered.
+///
+/// \param AllowExplicit true if the conversion should consider C++0x
+/// "explicit" conversion functions as well as non-explicit conversion
+/// functions (C++0x [class.conv.fct]p2).
+///
+/// \param ForceRValue true if the expression should be treated as an rvalue
+/// for overload resolution.
+bool Sema::IsUserDefinedConversion(Expr *From, QualType ToType,
+ UserDefinedConversionSequence& User,
+ bool AllowConversionFunctions,
+ bool AllowExplicit, bool ForceRValue)
+{
+ OverloadCandidateSet CandidateSet;
+ if (const RecordType *ToRecordType = ToType->getAsRecordType()) {
+ if (CXXRecordDecl *ToRecordDecl
+ = dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
+ // C++ [over.match.ctor]p1:
+ // When objects of class type are direct-initialized (8.5), or
+ // copy-initialized from an expression of the same or a
+ // derived class type (8.5), overload resolution selects the
+ // constructor. [...] For copy-initialization, the candidate
+ // functions are all the converting constructors (12.3.1) of
+ // that class. The argument list is the expression-list within
+ // the parentheses of the initializer.
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ToType).getUnqualifiedType());
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd)
+ = ToRecordDecl->lookup(Context, ConstructorName);
+ Con != ConEnd; ++Con) {
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isConvertingConstructor())
+ AddOverloadCandidate(Constructor, &From, 1, CandidateSet,
+ /*SuppressUserConversions=*/true, ForceRValue);
+ }
+ }
+ }
+
+ if (!AllowConversionFunctions) {
+ // Don't allow any conversion functions to enter the overload set.
+ } else if (const RecordType *FromRecordType
+ = From->getType()->getAsRecordType()) {
+ if (CXXRecordDecl *FromRecordDecl
+ = dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) {
+ // Add all of the conversion functions as candidates.
+ // FIXME: Look for conversions in base classes!
+ OverloadedFunctionDecl *Conversions
+ = FromRecordDecl->getConversionFunctions();
+ for (OverloadedFunctionDecl::function_iterator Func
+ = Conversions->function_begin();
+ Func != Conversions->function_end(); ++Func) {
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(*Func);
+ if (AllowExplicit || !Conv->isExplicit())
+ AddConversionCandidate(Conv, From, ToType, CandidateSet);
+ }
+ }
+ }
+
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success:
+ // Record the standard conversion we used and the conversion function.
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(Best->Function)) {
+ // C++ [over.ics.user]p1:
+ // If the user-defined conversion is specified by a
+ // constructor (12.3.1), the initial standard conversion
+ // sequence converts the source type to the type required by
+ // the argument of the constructor.
+ //
+ // FIXME: What about ellipsis conversions?
+ QualType ThisType = Constructor->getThisType(Context);
+ User.Before = Best->Conversions[0].Standard;
+ User.ConversionFunction = Constructor;
+ User.After.setAsIdentityConversion();
+ User.After.FromTypePtr
+ = ThisType->getAsPointerType()->getPointeeType().getAsOpaquePtr();
+ User.After.ToTypePtr = ToType.getAsOpaquePtr();
+ return true;
+ } else if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>(Best->Function)) {
+ // C++ [over.ics.user]p1:
+ //
+ // [...] If the user-defined conversion is specified by a
+ // conversion function (12.3.2), the initial standard
+ // conversion sequence converts the source type to the
+ // implicit object parameter of the conversion function.
+ User.Before = Best->Conversions[0].Standard;
+ User.ConversionFunction = Conversion;
+
+ // C++ [over.ics.user]p2:
+ // The second standard conversion sequence converts the
+ // result of the user-defined conversion to the target type
+ // for the sequence. Since an implicit conversion sequence
+ // is an initialization, the special rules for
+ // initialization by user-defined conversion apply when
+ // selecting the best user-defined conversion for a
+ // user-defined conversion sequence (see 13.3.3 and
+ // 13.3.3.1).
+ User.After = Best->FinalConversion;
+ return true;
+ } else {
+ assert(false && "Not a constructor or conversion function?");
+ return false;
+ }
+
+ case OR_No_Viable_Function:
+ case OR_Deleted:
+ // No conversion here! We're done.
+ return false;
+
+ case OR_Ambiguous:
+ // FIXME: See C++ [over.best.ics]p10 for the handling of
+ // ambiguous conversion sequences.
+ return false;
+ }
+
+ return false;
+}
+
+/// CompareImplicitConversionSequences - Compare two implicit
+/// conversion sequences to determine whether one is better than the
+/// other or if they are indistinguishable (C++ 13.3.3.2).
+ImplicitConversionSequence::CompareKind
+Sema::CompareImplicitConversionSequences(const ImplicitConversionSequence& ICS1,
+ const ImplicitConversionSequence& ICS2)
+{
+ // (C++ 13.3.3.2p2): When comparing the basic forms of implicit
+ // conversion sequences (as defined in 13.3.3.1)
+ // -- a standard conversion sequence (13.3.3.1.1) is a better
+ // conversion sequence than a user-defined conversion sequence or
+ // an ellipsis conversion sequence, and
+ // -- a user-defined conversion sequence (13.3.3.1.2) is a better
+ // conversion sequence than an ellipsis conversion sequence
+ // (13.3.3.1.3).
+ //
+ if (ICS1.ConversionKind < ICS2.ConversionKind)
+ return ImplicitConversionSequence::Better;
+ else if (ICS2.ConversionKind < ICS1.ConversionKind)
+ return ImplicitConversionSequence::Worse;
+
+ // Two implicit conversion sequences of the same form are
+ // indistinguishable conversion sequences unless one of the
+ // following rules apply: (C++ 13.3.3.2p3):
+ if (ICS1.ConversionKind == ImplicitConversionSequence::StandardConversion)
+ return CompareStandardConversionSequences(ICS1.Standard, ICS2.Standard);
+ else if (ICS1.ConversionKind ==
+ ImplicitConversionSequence::UserDefinedConversion) {
+ // User-defined conversion sequence U1 is a better conversion
+ // sequence than another user-defined conversion sequence U2 if
+ // they contain the same user-defined conversion function or
+ // constructor and if the second standard conversion sequence of
+ // U1 is better than the second standard conversion sequence of
+ // U2 (C++ 13.3.3.2p3).
+ if (ICS1.UserDefined.ConversionFunction ==
+ ICS2.UserDefined.ConversionFunction)
+ return CompareStandardConversionSequences(ICS1.UserDefined.After,
+ ICS2.UserDefined.After);
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// CompareStandardConversionSequences - Compare two standard
+/// conversion sequences to determine whether one is better than the
+/// other or if they are indistinguishable (C++ 13.3.3.2p3).
+ImplicitConversionSequence::CompareKind
+Sema::CompareStandardConversionSequences(const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2)
+{
+ // Standard conversion sequence S1 is a better conversion sequence
+ // than standard conversion sequence S2 if (C++ 13.3.3.2p3):
+
+ // -- S1 is a proper subsequence of S2 (comparing the conversion
+ // sequences in the canonical form defined by 13.3.3.1.1,
+ // excluding any Lvalue Transformation; the identity conversion
+ // sequence is considered to be a subsequence of any
+ // non-identity conversion sequence) or, if not that,
+ if (SCS1.Second == SCS2.Second && SCS1.Third == SCS2.Third)
+ // Neither is a proper subsequence of the other. Do nothing.
+ ;
+ else if ((SCS1.Second == ICK_Identity && SCS1.Third == SCS2.Third) ||
+ (SCS1.Third == ICK_Identity && SCS1.Second == SCS2.Second) ||
+ (SCS1.Second == ICK_Identity &&
+ SCS1.Third == ICK_Identity))
+ // SCS1 is a proper subsequence of SCS2.
+ return ImplicitConversionSequence::Better;
+ else if ((SCS2.Second == ICK_Identity && SCS2.Third == SCS1.Third) ||
+ (SCS2.Third == ICK_Identity && SCS2.Second == SCS1.Second) ||
+ (SCS2.Second == ICK_Identity &&
+ SCS2.Third == ICK_Identity))
+ // SCS2 is a proper subsequence of SCS1.
+ return ImplicitConversionSequence::Worse;
+
+ // -- the rank of S1 is better than the rank of S2 (by the rules
+ // defined below), or, if not that,
+ ImplicitConversionRank Rank1 = SCS1.getRank();
+ ImplicitConversionRank Rank2 = SCS2.getRank();
+ if (Rank1 < Rank2)
+ return ImplicitConversionSequence::Better;
+ else if (Rank2 < Rank1)
+ return ImplicitConversionSequence::Worse;
+
+ // (C++ 13.3.3.2p4): Two conversion sequences with the same rank
+ // are indistinguishable unless one of the following rules
+ // applies:
+
+ // A conversion that is not a conversion of a pointer, or
+ // pointer to member, to bool is better than another conversion
+ // that is such a conversion.
+ if (SCS1.isPointerConversionToBool() != SCS2.isPointerConversionToBool())
+ return SCS2.isPointerConversionToBool()
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+
+ // C++ [over.ics.rank]p4b2:
+ //
+ // If class B is derived directly or indirectly from class A,
+ // conversion of B* to A* is better than conversion of B* to
+ // void*, and conversion of A* to void* is better than conversion
+ // of B* to void*.
+ bool SCS1ConvertsToVoid
+ = SCS1.isPointerConversionToVoidPointer(Context);
+ bool SCS2ConvertsToVoid
+ = SCS2.isPointerConversionToVoidPointer(Context);
+ if (SCS1ConvertsToVoid != SCS2ConvertsToVoid) {
+ // Exactly one of the conversion sequences is a conversion to
+ // a void pointer; it's the worse conversion.
+ return SCS2ConvertsToVoid ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ } else if (!SCS1ConvertsToVoid && !SCS2ConvertsToVoid) {
+ // Neither conversion sequence converts to a void pointer; compare
+ // their derived-to-base conversions.
+ if (ImplicitConversionSequence::CompareKind DerivedCK
+ = CompareDerivedToBaseConversions(SCS1, SCS2))
+ return DerivedCK;
+ } else if (SCS1ConvertsToVoid && SCS2ConvertsToVoid) {
+ // Both conversion sequences are conversions to void
+ // pointers. Compare the source types to determine if there's an
+ // inheritance relationship in their sources.
+ QualType FromType1 = QualType::getFromOpaquePtr(SCS1.FromTypePtr);
+ QualType FromType2 = QualType::getFromOpaquePtr(SCS2.FromTypePtr);
+
+ // Adjust the types we're converting from via the array-to-pointer
+ // conversion, if we need to.
+ if (SCS1.First == ICK_Array_To_Pointer)
+ FromType1 = Context.getArrayDecayedType(FromType1);
+ if (SCS2.First == ICK_Array_To_Pointer)
+ FromType2 = Context.getArrayDecayedType(FromType2);
+
+ QualType FromPointee1
+ = FromType1->getAsPointerType()->getPointeeType().getUnqualifiedType();
+ QualType FromPointee2
+ = FromType2->getAsPointerType()->getPointeeType().getUnqualifiedType();
+
+ if (IsDerivedFrom(FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Better;
+ else if (IsDerivedFrom(FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Worse;
+
+ // Objective-C++: If one interface is more specific than the
+ // other, it is the better one.
+ const ObjCInterfaceType* FromIface1 = FromPointee1->getAsObjCInterfaceType();
+ const ObjCInterfaceType* FromIface2 = FromPointee2->getAsObjCInterfaceType();
+ if (FromIface1 && FromIface1) {
+ if (Context.canAssignObjCInterfaces(FromIface2, FromIface1))
+ return ImplicitConversionSequence::Better;
+ else if (Context.canAssignObjCInterfaces(FromIface1, FromIface2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ // Compare based on qualification conversions (C++ 13.3.3.2p3,
+ // bullet 3).
+ if (ImplicitConversionSequence::CompareKind QualCK
+ = CompareQualificationConversions(SCS1, SCS2))
+ return QualCK;
+
+ if (SCS1.ReferenceBinding && SCS2.ReferenceBinding) {
+ // C++0x [over.ics.rank]p3b4:
+ // -- S1 and S2 are reference bindings (8.5.3) and neither refers to an
+ // implicit object parameter of a non-static member function declared
+ // without a ref-qualifier, and S1 binds an rvalue reference to an
+ // rvalue and S2 binds an lvalue reference.
+ // FIXME: We don't know if we're dealing with the implicit object parameter,
+ // or if the member function in this case has a ref qualifier.
+ // (Of course, we don't have ref qualifiers yet.)
+ if (SCS1.RRefBinding != SCS2.RRefBinding)
+ return SCS1.RRefBinding ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+
+ // C++ [over.ics.rank]p3b4:
+ // -- S1 and S2 are reference bindings (8.5.3), and the types to
+ // which the references refer are the same type except for
+ // top-level cv-qualifiers, and the type to which the reference
+ // initialized by S2 refers is more cv-qualified than the type
+ // to which the reference initialized by S1 refers.
+ QualType T1 = QualType::getFromOpaquePtr(SCS1.ToTypePtr);
+ QualType T2 = QualType::getFromOpaquePtr(SCS2.ToTypePtr);
+ T1 = Context.getCanonicalType(T1);
+ T2 = Context.getCanonicalType(T2);
+ if (T1.getUnqualifiedType() == T2.getUnqualifiedType()) {
+ if (T2.isMoreQualifiedThan(T1))
+ return ImplicitConversionSequence::Better;
+ else if (T1.isMoreQualifiedThan(T2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// CompareQualificationConversions - Compares two standard conversion
+/// sequences to determine whether they can be ranked based on their
+/// qualification conversions (C++ 13.3.3.2p3 bullet 3).
+ImplicitConversionSequence::CompareKind
+Sema::CompareQualificationConversions(const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2)
+{
+ // C++ 13.3.3.2p3:
+ // -- S1 and S2 differ only in their qualification conversion and
+ // yield similar types T1 and T2 (C++ 4.4), respectively, and the
+ // cv-qualification signature of type T1 is a proper subset of
+ // the cv-qualification signature of type T2, and S1 is not the
+ // deprecated string literal array-to-pointer conversion (4.2).
+ if (SCS1.First != SCS2.First || SCS1.Second != SCS2.Second ||
+ SCS1.Third != SCS2.Third || SCS1.Third != ICK_Qualification)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // FIXME: the example in the standard doesn't use a qualification
+ // conversion (!)
+ QualType T1 = QualType::getFromOpaquePtr(SCS1.ToTypePtr);
+ QualType T2 = QualType::getFromOpaquePtr(SCS2.ToTypePtr);
+ T1 = Context.getCanonicalType(T1);
+ T2 = Context.getCanonicalType(T2);
+
+ // If the types are the same, we won't learn anything by unwrapped
+ // them.
+ if (T1.getUnqualifiedType() == T2.getUnqualifiedType())
+ return ImplicitConversionSequence::Indistinguishable;
+
+ ImplicitConversionSequence::CompareKind Result
+ = ImplicitConversionSequence::Indistinguishable;
+ while (UnwrapSimilarPointerTypes(T1, T2)) {
+ // Within each iteration of the loop, we check the qualifiers to
+ // determine if this still looks like a qualification
+ // conversion. Then, if all is well, we unwrap one more level of
+ // pointers or pointers-to-members and do it all again
+ // until there are no more pointers or pointers-to-members left
+ // to unwrap. This essentially mimics what
+ // IsQualificationConversion does, but here we're checking for a
+ // strict subset of qualifiers.
+ if (T1.getCVRQualifiers() == T2.getCVRQualifiers())
+ // The qualifiers are the same, so this doesn't tell us anything
+ // about how the sequences rank.
+ ;
+ else if (T2.isMoreQualifiedThan(T1)) {
+ // T1 has fewer qualifiers, so it could be the better sequence.
+ if (Result == ImplicitConversionSequence::Worse)
+ // Neither has qualifiers that are a subset of the other's
+ // qualifiers.
+ return ImplicitConversionSequence::Indistinguishable;
+
+ Result = ImplicitConversionSequence::Better;
+ } else if (T1.isMoreQualifiedThan(T2)) {
+ // T2 has fewer qualifiers, so it could be the better sequence.
+ if (Result == ImplicitConversionSequence::Better)
+ // Neither has qualifiers that are a subset of the other's
+ // qualifiers.
+ return ImplicitConversionSequence::Indistinguishable;
+
+ Result = ImplicitConversionSequence::Worse;
+ } else {
+ // Qualifiers are disjoint.
+ return ImplicitConversionSequence::Indistinguishable;
+ }
+
+ // If the types after this point are equivalent, we're done.
+ if (T1.getUnqualifiedType() == T2.getUnqualifiedType())
+ break;
+ }
+
+ // Check that the winning standard conversion sequence isn't using
+ // the deprecated string literal array to pointer conversion.
+ switch (Result) {
+ case ImplicitConversionSequence::Better:
+ if (SCS1.Deprecated)
+ Result = ImplicitConversionSequence::Indistinguishable;
+ break;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ if (SCS2.Deprecated)
+ Result = ImplicitConversionSequence::Indistinguishable;
+ break;
+ }
+
+ return Result;
+}
+
+/// CompareDerivedToBaseConversions - Compares two standard conversion
+/// sequences to determine whether they can be ranked based on their
+/// various kinds of derived-to-base conversions (C++
+/// [over.ics.rank]p4b3). As part of these checks, we also look at
+/// conversions between Objective-C interface types.
+ImplicitConversionSequence::CompareKind
+Sema::CompareDerivedToBaseConversions(const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2) {
+ QualType FromType1 = QualType::getFromOpaquePtr(SCS1.FromTypePtr);
+ QualType ToType1 = QualType::getFromOpaquePtr(SCS1.ToTypePtr);
+ QualType FromType2 = QualType::getFromOpaquePtr(SCS2.FromTypePtr);
+ QualType ToType2 = QualType::getFromOpaquePtr(SCS2.ToTypePtr);
+
+ // Adjust the types we're converting from via the array-to-pointer
+ // conversion, if we need to.
+ if (SCS1.First == ICK_Array_To_Pointer)
+ FromType1 = Context.getArrayDecayedType(FromType1);
+ if (SCS2.First == ICK_Array_To_Pointer)
+ FromType2 = Context.getArrayDecayedType(FromType2);
+
+ // Canonicalize all of the types.
+ FromType1 = Context.getCanonicalType(FromType1);
+ ToType1 = Context.getCanonicalType(ToType1);
+ FromType2 = Context.getCanonicalType(FromType2);
+ ToType2 = Context.getCanonicalType(ToType2);
+
+ // C++ [over.ics.rank]p4b3:
+ //
+ // If class B is derived directly or indirectly from class A and
+ // class C is derived directly or indirectly from B,
+ //
+ // For Objective-C, we let A, B, and C also be Objective-C
+ // interfaces.
+
+ // Compare based on pointer conversions.
+ if (SCS1.Second == ICK_Pointer_Conversion &&
+ SCS2.Second == ICK_Pointer_Conversion &&
+ /*FIXME: Remove if Objective-C id conversions get their own rank*/
+ FromType1->isPointerType() && FromType2->isPointerType() &&
+ ToType1->isPointerType() && ToType2->isPointerType()) {
+ QualType FromPointee1
+ = FromType1->getAsPointerType()->getPointeeType().getUnqualifiedType();
+ QualType ToPointee1
+ = ToType1->getAsPointerType()->getPointeeType().getUnqualifiedType();
+ QualType FromPointee2
+ = FromType2->getAsPointerType()->getPointeeType().getUnqualifiedType();
+ QualType ToPointee2
+ = ToType2->getAsPointerType()->getPointeeType().getUnqualifiedType();
+
+ const ObjCInterfaceType* FromIface1 = FromPointee1->getAsObjCInterfaceType();
+ const ObjCInterfaceType* FromIface2 = FromPointee2->getAsObjCInterfaceType();
+ const ObjCInterfaceType* ToIface1 = ToPointee1->getAsObjCInterfaceType();
+ const ObjCInterfaceType* ToIface2 = ToPointee2->getAsObjCInterfaceType();
+
+ // -- conversion of C* to B* is better than conversion of C* to A*,
+ if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
+ if (IsDerivedFrom(ToPointee1, ToPointee2))
+ return ImplicitConversionSequence::Better;
+ else if (IsDerivedFrom(ToPointee2, ToPointee1))
+ return ImplicitConversionSequence::Worse;
+
+ if (ToIface1 && ToIface2) {
+ if (Context.canAssignObjCInterfaces(ToIface2, ToIface1))
+ return ImplicitConversionSequence::Better;
+ else if (Context.canAssignObjCInterfaces(ToIface1, ToIface2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ // -- conversion of B* to A* is better than conversion of C* to A*,
+ if (FromPointee1 != FromPointee2 && ToPointee1 == ToPointee2) {
+ if (IsDerivedFrom(FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Better;
+ else if (IsDerivedFrom(FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Worse;
+
+ if (FromIface1 && FromIface2) {
+ if (Context.canAssignObjCInterfaces(FromIface1, FromIface2))
+ return ImplicitConversionSequence::Better;
+ else if (Context.canAssignObjCInterfaces(FromIface2, FromIface1))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+ }
+
+ // Compare based on reference bindings.
+ if (SCS1.ReferenceBinding && SCS2.ReferenceBinding &&
+ SCS1.Second == ICK_Derived_To_Base) {
+ // -- binding of an expression of type C to a reference of type
+ // B& is better than binding an expression of type C to a
+ // reference of type A&,
+ if (FromType1.getUnqualifiedType() == FromType2.getUnqualifiedType() &&
+ ToType1.getUnqualifiedType() != ToType2.getUnqualifiedType()) {
+ if (IsDerivedFrom(ToType1, ToType2))
+ return ImplicitConversionSequence::Better;
+ else if (IsDerivedFrom(ToType2, ToType1))
+ return ImplicitConversionSequence::Worse;
+ }
+
+ // -- binding of an expression of type B to a reference of type
+ // A& is better than binding an expression of type C to a
+ // reference of type A&,
+ if (FromType1.getUnqualifiedType() != FromType2.getUnqualifiedType() &&
+ ToType1.getUnqualifiedType() == ToType2.getUnqualifiedType()) {
+ if (IsDerivedFrom(FromType2, FromType1))
+ return ImplicitConversionSequence::Better;
+ else if (IsDerivedFrom(FromType1, FromType2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+
+ // FIXME: conversion of A::* to B::* is better than conversion of
+ // A::* to C::*,
+
+ // FIXME: conversion of B::* to C::* is better than conversion of
+ // A::* to C::*, and
+
+ if (SCS1.CopyConstructor && SCS2.CopyConstructor &&
+ SCS1.Second == ICK_Derived_To_Base) {
+ // -- conversion of C to B is better than conversion of C to A,
+ if (FromType1.getUnqualifiedType() == FromType2.getUnqualifiedType() &&
+ ToType1.getUnqualifiedType() != ToType2.getUnqualifiedType()) {
+ if (IsDerivedFrom(ToType1, ToType2))
+ return ImplicitConversionSequence::Better;
+ else if (IsDerivedFrom(ToType2, ToType1))
+ return ImplicitConversionSequence::Worse;
+ }
+
+ // -- conversion of B to A is better than conversion of C to A.
+ if (FromType1.getUnqualifiedType() != FromType2.getUnqualifiedType() &&
+ ToType1.getUnqualifiedType() == ToType2.getUnqualifiedType()) {
+ if (IsDerivedFrom(FromType2, FromType1))
+ return ImplicitConversionSequence::Better;
+ else if (IsDerivedFrom(FromType1, FromType2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// TryCopyInitialization - Try to copy-initialize a value of type
+/// ToType from the expression From. Return the implicit conversion
+/// sequence required to pass this argument, which may be a bad
+/// conversion sequence (meaning that the argument cannot be passed to
+/// a parameter of this type). If @p SuppressUserConversions, then we
+/// do not permit any user-defined conversion sequences. If @p ForceRValue,
+/// then we treat @p From as an rvalue, even if it is an lvalue.
+ImplicitConversionSequence
+Sema::TryCopyInitialization(Expr *From, QualType ToType,
+ bool SuppressUserConversions, bool ForceRValue) {
+ if (ToType->isReferenceType()) {
+ ImplicitConversionSequence ICS;
+ CheckReferenceInit(From, ToType, &ICS, SuppressUserConversions,
+ /*AllowExplicit=*/false, ForceRValue);
+ return ICS;
+ } else {
+ return TryImplicitConversion(From, ToType, SuppressUserConversions,
+ ForceRValue);
+ }
+}
+
+/// PerformCopyInitialization - Copy-initialize an object of type @p ToType with
+/// the expression @p From. Returns true (and emits a diagnostic) if there was
+/// an error, returns false if the initialization succeeded. Elidable should
+/// be true when the copy may be elided (C++ 12.8p15). Overload resolution works
+/// differently in C++0x for this case.
+bool Sema::PerformCopyInitialization(Expr *&From, QualType ToType,
+ const char* Flavor, bool Elidable) {
+ if (!getLangOptions().CPlusPlus) {
+ // In C, argument passing is the same as performing an assignment.
+ QualType FromType = From->getType();
+
+ AssignConvertType ConvTy =
+ CheckSingleAssignmentConstraints(ToType, From);
+ if (ConvTy != Compatible &&
+ CheckTransparentUnionArgumentConstraints(ToType, From) == Compatible)
+ ConvTy = Compatible;
+
+ return DiagnoseAssignmentResult(ConvTy, From->getLocStart(), ToType,
+ FromType, From, Flavor);
+ }
+
+ if (ToType->isReferenceType())
+ return CheckReferenceInit(From, ToType);
+
+ if (!PerformImplicitConversion(From, ToType, Flavor,
+ /*AllowExplicit=*/false, Elidable))
+ return false;
+
+ return Diag(From->getSourceRange().getBegin(),
+ diag::err_typecheck_convert_incompatible)
+ << ToType << From->getType() << Flavor << From->getSourceRange();
+}
+
+/// TryObjectArgumentInitialization - Try to initialize the object
+/// parameter of the given member function (@c Method) from the
+/// expression @p From.
+ImplicitConversionSequence
+Sema::TryObjectArgumentInitialization(Expr *From, CXXMethodDecl *Method) {
+ QualType ClassType = Context.getTypeDeclType(Method->getParent());
+ unsigned MethodQuals = Method->getTypeQualifiers();
+ QualType ImplicitParamType = ClassType.getQualifiedType(MethodQuals);
+
+ // Set up the conversion sequence as a "bad" conversion, to allow us
+ // to exit early.
+ ImplicitConversionSequence ICS;
+ ICS.Standard.setAsIdentityConversion();
+ ICS.ConversionKind = ImplicitConversionSequence::BadConversion;
+
+ // We need to have an object of class type.
+ QualType FromType = From->getType();
+ if (const PointerType *PT = FromType->getAsPointerType())
+ FromType = PT->getPointeeType();
+
+ assert(FromType->isRecordType());
+
+ // The implicit object parmeter is has the type "reference to cv X",
+ // where X is the class of which the function is a member
+ // (C++ [over.match.funcs]p4). However, when finding an implicit
+ // conversion sequence for the argument, we are not allowed to
+ // create temporaries or perform user-defined conversions
+ // (C++ [over.match.funcs]p5). We perform a simplified version of
+ // reference binding here, that allows class rvalues to bind to
+ // non-constant references.
+
+ // First check the qualifiers. We don't care about lvalue-vs-rvalue
+ // with the implicit object parameter (C++ [over.match.funcs]p5).
+ QualType FromTypeCanon = Context.getCanonicalType(FromType);
+ if (ImplicitParamType.getCVRQualifiers() != FromType.getCVRQualifiers() &&
+ !ImplicitParamType.isAtLeastAsQualifiedAs(FromType))
+ return ICS;
+
+ // Check that we have either the same type or a derived type. It
+ // affects the conversion rank.
+ QualType ClassTypeCanon = Context.getCanonicalType(ClassType);
+ if (ClassTypeCanon == FromTypeCanon.getUnqualifiedType())
+ ICS.Standard.Second = ICK_Identity;
+ else if (IsDerivedFrom(FromType, ClassType))
+ ICS.Standard.Second = ICK_Derived_To_Base;
+ else
+ return ICS;
+
+ // Success. Mark this as a reference binding.
+ ICS.ConversionKind = ImplicitConversionSequence::StandardConversion;
+ ICS.Standard.FromTypePtr = FromType.getAsOpaquePtr();
+ ICS.Standard.ToTypePtr = ImplicitParamType.getAsOpaquePtr();
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.DirectBinding = true;
+ ICS.Standard.RRefBinding = false;
+ return ICS;
+}
+
+/// PerformObjectArgumentInitialization - Perform initialization of
+/// the implicit object parameter for the given Method with the given
+/// expression.
+bool
+Sema::PerformObjectArgumentInitialization(Expr *&From, CXXMethodDecl *Method) {
+ QualType FromRecordType, DestType;
+ QualType ImplicitParamRecordType =
+ Method->getThisType(Context)->getAsPointerType()->getPointeeType();
+
+ if (const PointerType *PT = From->getType()->getAsPointerType()) {
+ FromRecordType = PT->getPointeeType();
+ DestType = Method->getThisType(Context);
+ } else {
+ FromRecordType = From->getType();
+ DestType = ImplicitParamRecordType;
+ }
+
+ ImplicitConversionSequence ICS
+ = TryObjectArgumentInitialization(From, Method);
+ if (ICS.ConversionKind == ImplicitConversionSequence::BadConversion)
+ return Diag(From->getSourceRange().getBegin(),
+ diag::err_implicit_object_parameter_init)
+ << ImplicitParamRecordType << FromRecordType << From->getSourceRange();
+
+ if (ICS.Standard.Second == ICK_Derived_To_Base &&
+ CheckDerivedToBaseConversion(FromRecordType,
+ ImplicitParamRecordType,
+ From->getSourceRange().getBegin(),
+ From->getSourceRange()))
+ return true;
+
+ ImpCastExprToType(From, DestType, /*isLvalue=*/true);
+ return false;
+}
+
+/// TryContextuallyConvertToBool - Attempt to contextually convert the
+/// expression From to bool (C++0x [conv]p3).
+ImplicitConversionSequence Sema::TryContextuallyConvertToBool(Expr *From) {
+ return TryImplicitConversion(From, Context.BoolTy, false, true);
+}
+
+/// PerformContextuallyConvertToBool - Perform a contextual conversion
+/// of the expression From to bool (C++0x [conv]p3).
+bool Sema::PerformContextuallyConvertToBool(Expr *&From) {
+ ImplicitConversionSequence ICS = TryContextuallyConvertToBool(From);
+ if (!PerformImplicitConversion(From, Context.BoolTy, ICS, "converting"))
+ return false;
+
+ return Diag(From->getSourceRange().getBegin(),
+ diag::err_typecheck_bool_condition)
+ << From->getType() << From->getSourceRange();
+}
+
+/// AddOverloadCandidate - Adds the given function to the set of
+/// candidate functions, using the given function call arguments. If
+/// @p SuppressUserConversions, then don't allow user-defined
+/// conversions via constructors or conversion operators.
+/// If @p ForceRValue, treat all arguments as rvalues. This is a slightly
+/// hacky way to implement the overloading rules for elidable copy
+/// initialization in C++0x (C++0x 12.8p15).
+void
+Sema::AddOverloadCandidate(FunctionDecl *Function,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions,
+ bool ForceRValue)
+{
+ const FunctionProtoType* Proto
+ = dyn_cast<FunctionProtoType>(Function->getType()->getAsFunctionType());
+ assert(Proto && "Functions without a prototype cannot be overloaded");
+ assert(!isa<CXXConversionDecl>(Function) &&
+ "Use AddConversionCandidate for conversion functions");
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
+ if (!isa<CXXConstructorDecl>(Method)) {
+ // If we get here, it's because we're calling a member function
+ // that is named without a member access expression (e.g.,
+ // "this->f") that was either written explicitly or created
+ // implicitly. This can happen with a qualified call to a member
+ // function, e.g., X::f(). We use a NULL object as the implied
+ // object argument (C++ [over.call.func]p3).
+ AddMethodCandidate(Method, 0, Args, NumArgs, CandidateSet,
+ SuppressUserConversions, ForceRValue);
+ return;
+ }
+ // We treat a constructor like a non-member function, since its object
+ // argument doesn't participate in overload resolution.
+ }
+
+
+ // Add this candidate
+ CandidateSet.push_back(OverloadCandidate());
+ OverloadCandidate& Candidate = CandidateSet.back();
+ Candidate.Function = Function;
+ Candidate.Viable = true;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+
+ unsigned NumArgsInProto = Proto->getNumArgs();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (NumArgs > NumArgsInProto && !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ return;
+ }
+
+ // (C++ 13.3.2p2): A candidate function having more than m parameters
+ // is viable only if the (m+1)st parameter has a default argument
+ // (8.3.6). For the purposes of overload resolution, the
+ // parameter list is truncated on the right, so that there are
+ // exactly m parameters.
+ unsigned MinRequiredArgs = Function->getMinRequiredArguments();
+ if (NumArgs < MinRequiredArgs) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ return;
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ Candidate.Conversions.resize(NumArgs);
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ if (ArgIdx < NumArgsInProto) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getArgType(ArgIdx);
+ Candidate.Conversions[ArgIdx]
+ = TryCopyInitialization(Args[ArgIdx], ParamType,
+ SuppressUserConversions, ForceRValue);
+ if (Candidate.Conversions[ArgIdx].ConversionKind
+ == ImplicitConversionSequence::BadConversion) {
+ Candidate.Viable = false;
+ break;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx].ConversionKind
+ = ImplicitConversionSequence::EllipsisConversion;
+ }
+ }
+}
+
+/// \brief Add all of the function declarations in the given function set to
+/// the overload canddiate set.
+void Sema::AddFunctionCandidates(const FunctionSet &Functions,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions) {
+ for (FunctionSet::const_iterator F = Functions.begin(),
+ FEnd = Functions.end();
+ F != FEnd; ++F)
+ AddOverloadCandidate(*F, Args, NumArgs, CandidateSet,
+ SuppressUserConversions);
+}
+
+/// AddMethodCandidate - Adds the given C++ member function to the set
+/// of candidate functions, using the given function call arguments
+/// and the object argument (@c Object). For example, in a call
+/// @c o.f(a1,a2), @c Object will contain @c o and @c Args will contain
+/// both @c a1 and @c a2. If @p SuppressUserConversions, then don't
+/// allow user-defined conversions via constructors or conversion
+/// operators. If @p ForceRValue, treat all arguments as rvalues. This is
+/// a slightly hacky way to implement the overloading rules for elidable copy
+/// initialization in C++0x (C++0x 12.8p15).
+void
+Sema::AddMethodCandidate(CXXMethodDecl *Method, Expr *Object,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions, bool ForceRValue)
+{
+ const FunctionProtoType* Proto
+ = dyn_cast<FunctionProtoType>(Method->getType()->getAsFunctionType());
+ assert(Proto && "Methods without a prototype cannot be overloaded");
+ assert(!isa<CXXConversionDecl>(Method) &&
+ "Use AddConversionCandidate for conversion functions");
+ assert(!isa<CXXConstructorDecl>(Method) &&
+ "Use AddOverloadCandidate for constructors");
+
+ // Add this candidate
+ CandidateSet.push_back(OverloadCandidate());
+ OverloadCandidate& Candidate = CandidateSet.back();
+ Candidate.Function = Method;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+
+ unsigned NumArgsInProto = Proto->getNumArgs();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (NumArgs > NumArgsInProto && !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ return;
+ }
+
+ // (C++ 13.3.2p2): A candidate function having more than m parameters
+ // is viable only if the (m+1)st parameter has a default argument
+ // (8.3.6). For the purposes of overload resolution, the
+ // parameter list is truncated on the right, so that there are
+ // exactly m parameters.
+ unsigned MinRequiredArgs = Method->getMinRequiredArguments();
+ if (NumArgs < MinRequiredArgs) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ return;
+ }
+
+ Candidate.Viable = true;
+ Candidate.Conversions.resize(NumArgs + 1);
+
+ if (Method->isStatic() || !Object)
+ // The implicit object argument is ignored.
+ Candidate.IgnoreObjectArgument = true;
+ else {
+ // Determine the implicit conversion sequence for the object
+ // parameter.
+ Candidate.Conversions[0] = TryObjectArgumentInitialization(Object, Method);
+ if (Candidate.Conversions[0].ConversionKind
+ == ImplicitConversionSequence::BadConversion) {
+ Candidate.Viable = false;
+ return;
+ }
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ if (ArgIdx < NumArgsInProto) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getArgType(ArgIdx);
+ Candidate.Conversions[ArgIdx + 1]
+ = TryCopyInitialization(Args[ArgIdx], ParamType,
+ SuppressUserConversions, ForceRValue);
+ if (Candidate.Conversions[ArgIdx + 1].ConversionKind
+ == ImplicitConversionSequence::BadConversion) {
+ Candidate.Viable = false;
+ break;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx + 1].ConversionKind
+ = ImplicitConversionSequence::EllipsisConversion;
+ }
+ }
+}
+
+/// AddConversionCandidate - Add a C++ conversion function as a
+/// candidate in the candidate set (C++ [over.match.conv],
+/// C++ [over.match.copy]). From is the expression we're converting from,
+/// and ToType is the type that we're eventually trying to convert to
+/// (which may or may not be the same type as the type that the
+/// conversion function produces).
+void
+Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet& CandidateSet) {
+ // Add this candidate
+ CandidateSet.push_back(OverloadCandidate());
+ OverloadCandidate& Candidate = CandidateSet.back();
+ Candidate.Function = Conversion;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.FinalConversion.setAsIdentityConversion();
+ Candidate.FinalConversion.FromTypePtr
+ = Conversion->getConversionType().getAsOpaquePtr();
+ Candidate.FinalConversion.ToTypePtr = ToType.getAsOpaquePtr();
+
+ // Determine the implicit conversion sequence for the implicit
+ // object parameter.
+ Candidate.Viable = true;
+ Candidate.Conversions.resize(1);
+ Candidate.Conversions[0] = TryObjectArgumentInitialization(From, Conversion);
+
+ if (Candidate.Conversions[0].ConversionKind
+ == ImplicitConversionSequence::BadConversion) {
+ Candidate.Viable = false;
+ return;
+ }
+
+ // To determine what the conversion from the result of calling the
+ // conversion function to the type we're eventually trying to
+ // convert to (ToType), we need to synthesize a call to the
+ // conversion function and attempt copy initialization from it. This
+ // makes sure that we get the right semantics with respect to
+ // lvalues/rvalues and the type. Fortunately, we can allocate this
+ // call on the stack and we don't need its arguments to be
+ // well-formed.
+ DeclRefExpr ConversionRef(Conversion, Conversion->getType(),
+ SourceLocation());
+ ImplicitCastExpr ConversionFn(Context.getPointerType(Conversion->getType()),
+ &ConversionRef, false);
+
+ // Note that it is safe to allocate CallExpr on the stack here because
+ // there are 0 arguments (i.e., nothing is allocated using ASTContext's
+ // allocator).
+ CallExpr Call(Context, &ConversionFn, 0, 0,
+ Conversion->getConversionType().getNonReferenceType(),
+ SourceLocation());
+ ImplicitConversionSequence ICS = TryCopyInitialization(&Call, ToType, true);
+ switch (ICS.ConversionKind) {
+ case ImplicitConversionSequence::StandardConversion:
+ Candidate.FinalConversion = ICS.Standard;
+ break;
+
+ case ImplicitConversionSequence::BadConversion:
+ Candidate.Viable = false;
+ break;
+
+ default:
+ assert(false &&
+ "Can only end up with a standard conversion sequence or failure");
+ }
+}
+
+/// AddSurrogateCandidate - Adds a "surrogate" candidate function that
+/// converts the given @c Object to a function pointer via the
+/// conversion function @c Conversion, and then attempts to call it
+/// with the given arguments (C++ [over.call.object]p2-4). Proto is
+/// the type of function that we'll eventually be calling.
+void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
+ const FunctionProtoType *Proto,
+ Expr *Object, Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet) {
+ CandidateSet.push_back(OverloadCandidate());
+ OverloadCandidate& Candidate = CandidateSet.back();
+ Candidate.Function = 0;
+ Candidate.Surrogate = Conversion;
+ Candidate.Viable = true;
+ Candidate.IsSurrogate = true;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.Conversions.resize(NumArgs + 1);
+
+ // Determine the implicit conversion sequence for the implicit
+ // object parameter.
+ ImplicitConversionSequence ObjectInit
+ = TryObjectArgumentInitialization(Object, Conversion);
+ if (ObjectInit.ConversionKind == ImplicitConversionSequence::BadConversion) {
+ Candidate.Viable = false;
+ return;
+ }
+
+ // The first conversion is actually a user-defined conversion whose
+ // first conversion is ObjectInit's standard conversion (which is
+ // effectively a reference binding). Record it as such.
+ Candidate.Conversions[0].ConversionKind
+ = ImplicitConversionSequence::UserDefinedConversion;
+ Candidate.Conversions[0].UserDefined.Before = ObjectInit.Standard;
+ Candidate.Conversions[0].UserDefined.ConversionFunction = Conversion;
+ Candidate.Conversions[0].UserDefined.After
+ = Candidate.Conversions[0].UserDefined.Before;
+ Candidate.Conversions[0].UserDefined.After.setAsIdentityConversion();
+
+ // Find the
+ unsigned NumArgsInProto = Proto->getNumArgs();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (NumArgs > NumArgsInProto && !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ return;
+ }
+
+ // Function types don't have any default arguments, so just check if
+ // we have enough arguments.
+ if (NumArgs < NumArgsInProto) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ return;
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ if (ArgIdx < NumArgsInProto) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getArgType(ArgIdx);
+ Candidate.Conversions[ArgIdx + 1]
+ = TryCopyInitialization(Args[ArgIdx], ParamType,
+ /*SuppressUserConversions=*/false);
+ if (Candidate.Conversions[ArgIdx + 1].ConversionKind
+ == ImplicitConversionSequence::BadConversion) {
+ Candidate.Viable = false;
+ break;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx + 1].ConversionKind
+ = ImplicitConversionSequence::EllipsisConversion;
+ }
+ }
+}
+
+// FIXME: This will eventually be removed, once we've migrated all of the
+// operator overloading logic over to the scheme used by binary operators, which
+// works for template instantiation.
+void Sema::AddOperatorCandidates(OverloadedOperatorKind Op, Scope *S,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ SourceRange OpRange) {
+
+ FunctionSet Functions;
+
+ QualType T1 = Args[0]->getType();
+ QualType T2;
+ if (NumArgs > 1)
+ T2 = Args[1]->getType();
+
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+ if (S)
+ LookupOverloadedOperatorName(Op, S, T1, T2, Functions);
+ ArgumentDependentLookup(OpName, Args, NumArgs, Functions);
+ AddFunctionCandidates(Functions, Args, NumArgs, CandidateSet);
+ AddMemberOperatorCandidates(Op, OpLoc, Args, NumArgs, CandidateSet, OpRange);
+ AddBuiltinOperatorCandidates(Op, Args, NumArgs, CandidateSet);
+}
+
+/// \brief Add overload candidates for overloaded operators that are
+/// member functions.
+///
+/// Add the overloaded operator candidates that are member functions
+/// for the operator Op that was used in an operator expression such
+/// as "x Op y". , Args/NumArgs provides the operator arguments, and
+/// CandidateSet will store the added overload candidates. (C++
+/// [over.match.oper]).
+void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ SourceRange OpRange) {
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+
+ // C++ [over.match.oper]p3:
+ // For a unary operator @ with an operand of a type whose
+ // cv-unqualified version is T1, and for a binary operator @ with
+ // a left operand of a type whose cv-unqualified version is T1 and
+ // a right operand of a type whose cv-unqualified version is T2,
+ // three sets of candidate functions, designated member
+ // candidates, non-member candidates and built-in candidates, are
+ // constructed as follows:
+ QualType T1 = Args[0]->getType();
+ QualType T2;
+ if (NumArgs > 1)
+ T2 = Args[1]->getType();
+
+ // -- If T1 is a class type, the set of member candidates is the
+ // result of the qualified lookup of T1::operator@
+ // (13.3.1.1.1); otherwise, the set of member candidates is
+ // empty.
+ // FIXME: Lookup in base classes, too!
+ if (const RecordType *T1Rec = T1->getAsRecordType()) {
+ DeclContext::lookup_const_iterator Oper, OperEnd;
+ for (llvm::tie(Oper, OperEnd) = T1Rec->getDecl()->lookup(Context, OpName);
+ Oper != OperEnd; ++Oper)
+ AddMethodCandidate(cast<CXXMethodDecl>(*Oper), Args[0],
+ Args+1, NumArgs - 1, CandidateSet,
+ /*SuppressUserConversions=*/false);
+ }
+}
+
+/// AddBuiltinCandidate - Add a candidate for a built-in
+/// operator. ResultTy and ParamTys are the result and parameter types
+/// of the built-in candidate, respectively. Args and NumArgs are the
+/// arguments being passed to the candidate. IsAssignmentOperator
+/// should be true when this built-in candidate is an assignment
+/// operator. NumContextualBoolArguments is the number of arguments
+/// (at the beginning of the argument list) that will be contextually
+/// converted to bool.
+void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool IsAssignmentOperator,
+ unsigned NumContextualBoolArguments) {
+ // Add this candidate
+ CandidateSet.push_back(OverloadCandidate());
+ OverloadCandidate& Candidate = CandidateSet.back();
+ Candidate.Function = 0;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.BuiltinTypes.ResultTy = ResultTy;
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ Candidate.BuiltinTypes.ParamTypes[ArgIdx] = ParamTys[ArgIdx];
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ Candidate.Viable = true;
+ Candidate.Conversions.resize(NumArgs);
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ // C++ [over.match.oper]p4:
+ // For the built-in assignment operators, conversions of the
+ // left operand are restricted as follows:
+ // -- no temporaries are introduced to hold the left operand, and
+ // -- no user-defined conversions are applied to the left
+ // operand to achieve a type match with the left-most
+ // parameter of a built-in candidate.
+ //
+ // We block these conversions by turning off user-defined
+ // conversions, since that is the only way that initialization of
+ // a reference to a non-class type can occur from something that
+ // is not of the same type.
+ if (ArgIdx < NumContextualBoolArguments) {
+ assert(ParamTys[ArgIdx] == Context.BoolTy &&
+ "Contextual conversion to bool requires bool type");
+ Candidate.Conversions[ArgIdx] = TryContextuallyConvertToBool(Args[ArgIdx]);
+ } else {
+ Candidate.Conversions[ArgIdx]
+ = TryCopyInitialization(Args[ArgIdx], ParamTys[ArgIdx],
+ ArgIdx == 0 && IsAssignmentOperator);
+ }
+ if (Candidate.Conversions[ArgIdx].ConversionKind
+ == ImplicitConversionSequence::BadConversion) {
+ Candidate.Viable = false;
+ break;
+ }
+ }
+}
+
+/// BuiltinCandidateTypeSet - A set of types that will be used for the
+/// candidate operator functions for built-in operators (C++
+/// [over.built]). The types are separated into pointer types and
+/// enumeration types.
+class BuiltinCandidateTypeSet {
+ /// TypeSet - A set of types.
+ typedef llvm::SmallPtrSet<QualType, 8> TypeSet;
+
+ /// PointerTypes - The set of pointer types that will be used in the
+ /// built-in candidates.
+ TypeSet PointerTypes;
+
+ /// MemberPointerTypes - The set of member pointer types that will be
+ /// used in the built-in candidates.
+ TypeSet MemberPointerTypes;
+
+ /// EnumerationTypes - The set of enumeration types that will be
+ /// used in the built-in candidates.
+ TypeSet EnumerationTypes;
+
+ /// Context - The AST context in which we will build the type sets.
+ ASTContext &Context;
+
+ bool AddPointerWithMoreQualifiedTypeVariants(QualType Ty);
+ bool AddMemberPointerWithMoreQualifiedTypeVariants(QualType Ty);
+
+public:
+ /// iterator - Iterates through the types that are part of the set.
+ typedef TypeSet::iterator iterator;
+
+ BuiltinCandidateTypeSet(ASTContext &Context) : Context(Context) { }
+
+ void AddTypesConvertedFrom(QualType Ty, bool AllowUserConversions,
+ bool AllowExplicitConversions);
+
+ /// pointer_begin - First pointer type found;
+ iterator pointer_begin() { return PointerTypes.begin(); }
+
+ /// pointer_end - Past the last pointer type found;
+ iterator pointer_end() { return PointerTypes.end(); }
+
+ /// member_pointer_begin - First member pointer type found;
+ iterator member_pointer_begin() { return MemberPointerTypes.begin(); }
+
+ /// member_pointer_end - Past the last member pointer type found;
+ iterator member_pointer_end() { return MemberPointerTypes.end(); }
+
+ /// enumeration_begin - First enumeration type found;
+ iterator enumeration_begin() { return EnumerationTypes.begin(); }
+
+ /// enumeration_end - Past the last enumeration type found;
+ iterator enumeration_end() { return EnumerationTypes.end(); }
+};
+
+/// AddPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty to
+/// the set of pointer types along with any more-qualified variants of
+/// that type. For example, if @p Ty is "int const *", this routine
+/// will add "int const *", "int const volatile *", "int const
+/// restrict *", and "int const volatile restrict *" to the set of
+/// pointer types. Returns true if the add of @p Ty itself succeeded,
+/// false otherwise.
+bool
+BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty) {
+ // Insert this type.
+ if (!PointerTypes.insert(Ty))
+ return false;
+
+ if (const PointerType *PointerTy = Ty->getAsPointerType()) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ // FIXME: Optimize this so that we don't keep trying to add the same types.
+
+ // FIXME: Do we have to add CVR qualifiers at *all* levels to deal with all
+ // pointer conversions that don't cast away constness?
+ if (!PointeeTy.isConstQualified())
+ AddPointerWithMoreQualifiedTypeVariants
+ (Context.getPointerType(PointeeTy.withConst()));
+ if (!PointeeTy.isVolatileQualified())
+ AddPointerWithMoreQualifiedTypeVariants
+ (Context.getPointerType(PointeeTy.withVolatile()));
+ if (!PointeeTy.isRestrictQualified())
+ AddPointerWithMoreQualifiedTypeVariants
+ (Context.getPointerType(PointeeTy.withRestrict()));
+ }
+
+ return true;
+}
+
+/// AddMemberPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty
+/// to the set of pointer types along with any more-qualified variants of
+/// that type. For example, if @p Ty is "int const *", this routine
+/// will add "int const *", "int const volatile *", "int const
+/// restrict *", and "int const volatile restrict *" to the set of
+/// pointer types. Returns true if the add of @p Ty itself succeeded,
+/// false otherwise.
+bool
+BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
+ QualType Ty) {
+ // Insert this type.
+ if (!MemberPointerTypes.insert(Ty))
+ return false;
+
+ if (const MemberPointerType *PointerTy = Ty->getAsMemberPointerType()) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ const Type *ClassTy = PointerTy->getClass();
+ // FIXME: Optimize this so that we don't keep trying to add the same types.
+
+ if (!PointeeTy.isConstQualified())
+ AddMemberPointerWithMoreQualifiedTypeVariants
+ (Context.getMemberPointerType(PointeeTy.withConst(), ClassTy));
+ if (!PointeeTy.isVolatileQualified())
+ AddMemberPointerWithMoreQualifiedTypeVariants
+ (Context.getMemberPointerType(PointeeTy.withVolatile(), ClassTy));
+ if (!PointeeTy.isRestrictQualified())
+ AddMemberPointerWithMoreQualifiedTypeVariants
+ (Context.getMemberPointerType(PointeeTy.withRestrict(), ClassTy));
+ }
+
+ return true;
+}
+
+/// AddTypesConvertedFrom - Add each of the types to which the type @p
+/// Ty can be implicit converted to the given set of @p Types. We're
+/// primarily interested in pointer types and enumeration types. We also
+/// take member pointer types, for the conditional operator.
+/// AllowUserConversions is true if we should look at the conversion
+/// functions of a class type, and AllowExplicitConversions if we
+/// should also include the explicit conversion functions of a class
+/// type.
+void
+BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
+ bool AllowUserConversions,
+ bool AllowExplicitConversions) {
+ // Only deal with canonical types.
+ Ty = Context.getCanonicalType(Ty);
+
+ // Look through reference types; they aren't part of the type of an
+ // expression for the purposes of conversions.
+ if (const ReferenceType *RefTy = Ty->getAsReferenceType())
+ Ty = RefTy->getPointeeType();
+
+ // We don't care about qualifiers on the type.
+ Ty = Ty.getUnqualifiedType();
+
+ if (const PointerType *PointerTy = Ty->getAsPointerType()) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+
+ // Insert our type, and its more-qualified variants, into the set
+ // of types.
+ if (!AddPointerWithMoreQualifiedTypeVariants(Ty))
+ return;
+
+ // Add 'cv void*' to our set of types.
+ if (!Ty->isVoidType()) {
+ QualType QualVoid
+ = Context.VoidTy.getQualifiedType(PointeeTy.getCVRQualifiers());
+ AddPointerWithMoreQualifiedTypeVariants(Context.getPointerType(QualVoid));
+ }
+
+ // If this is a pointer to a class type, add pointers to its bases
+ // (with the same level of cv-qualification as the original
+ // derived class, of course).
+ if (const RecordType *PointeeRec = PointeeTy->getAsRecordType()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(PointeeRec->getDecl());
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin();
+ Base != ClassDecl->bases_end(); ++Base) {
+ QualType BaseTy = Context.getCanonicalType(Base->getType());
+ BaseTy = BaseTy.getQualifiedType(PointeeTy.getCVRQualifiers());
+
+ // Add the pointer type, recursively, so that we get all of
+ // the indirect base classes, too.
+ AddTypesConvertedFrom(Context.getPointerType(BaseTy), false, false);
+ }
+ }
+ } else if (Ty->isMemberPointerType()) {
+ // Member pointers are far easier, since the pointee can't be converted.
+ if (!AddMemberPointerWithMoreQualifiedTypeVariants(Ty))
+ return;
+ } else if (Ty->isEnumeralType()) {
+ EnumerationTypes.insert(Ty);
+ } else if (AllowUserConversions) {
+ if (const RecordType *TyRec = Ty->getAsRecordType()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ // FIXME: Visit conversion functions in the base classes, too.
+ OverloadedFunctionDecl *Conversions
+ = ClassDecl->getConversionFunctions();
+ for (OverloadedFunctionDecl::function_iterator Func
+ = Conversions->function_begin();
+ Func != Conversions->function_end(); ++Func) {
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(*Func);
+ if (AllowExplicitConversions || !Conv->isExplicit())
+ AddTypesConvertedFrom(Conv->getConversionType(), false, false);
+ }
+ }
+ }
+}
+
+/// AddBuiltinOperatorCandidates - Add the appropriate built-in
+/// operator overloads to the candidate set (C++ [over.built]), based
+/// on the operator @p Op and the arguments given. For example, if the
+/// operator is a binary '+', this routine might add "int
+/// operator+(int, int)" to cover integer addition.
+void
+Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet) {
+ // The set of "promoted arithmetic types", which are the arithmetic
+ // types are that preserved by promotion (C++ [over.built]p2). Note
+ // that the first few of these types are the promoted integral
+ // types; these types need to be first.
+ // FIXME: What about complex?
+ const unsigned FirstIntegralType = 0;
+ const unsigned LastIntegralType = 13;
+ const unsigned FirstPromotedIntegralType = 7,
+ LastPromotedIntegralType = 13;
+ const unsigned FirstPromotedArithmeticType = 7,
+ LastPromotedArithmeticType = 16;
+ const unsigned NumArithmeticTypes = 16;
+ QualType ArithmeticTypes[NumArithmeticTypes] = {
+ Context.BoolTy, Context.CharTy, Context.WCharTy,
+ Context.SignedCharTy, Context.ShortTy,
+ Context.UnsignedCharTy, Context.UnsignedShortTy,
+ Context.IntTy, Context.LongTy, Context.LongLongTy,
+ Context.UnsignedIntTy, Context.UnsignedLongTy, Context.UnsignedLongLongTy,
+ Context.FloatTy, Context.DoubleTy, Context.LongDoubleTy
+ };
+
+ // Find all of the types that the arguments can convert to, but only
+ // if the operator we're looking at has built-in operator candidates
+ // that make use of these types.
+ BuiltinCandidateTypeSet CandidateTypes(Context);
+ if (Op == OO_Less || Op == OO_Greater || Op == OO_LessEqual ||
+ Op == OO_GreaterEqual || Op == OO_EqualEqual || Op == OO_ExclaimEqual ||
+ Op == OO_Plus || (Op == OO_Minus && NumArgs == 2) || Op == OO_Equal ||
+ Op == OO_PlusEqual || Op == OO_MinusEqual || Op == OO_Subscript ||
+ Op == OO_ArrowStar || Op == OO_PlusPlus || Op == OO_MinusMinus ||
+ (Op == OO_Star && NumArgs == 1) || Op == OO_Conditional) {
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ CandidateTypes.AddTypesConvertedFrom(Args[ArgIdx]->getType(),
+ true,
+ (Op == OO_Exclaim ||
+ Op == OO_AmpAmp ||
+ Op == OO_PipePipe));
+ }
+
+ bool isComparison = false;
+ switch (Op) {
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ assert(false && "Expected an overloaded operator");
+ break;
+
+ case OO_Star: // '*' is either unary or binary
+ if (NumArgs == 1)
+ goto UnaryStar;
+ else
+ goto BinaryStar;
+ break;
+
+ case OO_Plus: // '+' is either unary or binary
+ if (NumArgs == 1)
+ goto UnaryPlus;
+ else
+ goto BinaryPlus;
+ break;
+
+ case OO_Minus: // '-' is either unary or binary
+ if (NumArgs == 1)
+ goto UnaryMinus;
+ else
+ goto BinaryMinus;
+ break;
+
+ case OO_Amp: // '&' is either unary or binary
+ if (NumArgs == 1)
+ goto UnaryAmp;
+ else
+ goto BinaryAmp;
+
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ // C++ [over.built]p3:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type, and VQ
+ // is either volatile or empty, there exist candidate operator
+ // functions of the form
+ //
+ // VQ T& operator++(VQ T&);
+ // T operator++(VQ T&, int);
+ //
+ // C++ [over.built]p4:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type other
+ // than bool, and VQ is either volatile or empty, there exist
+ // candidate operator functions of the form
+ //
+ // VQ T& operator--(VQ T&);
+ // T operator--(VQ T&, int);
+ for (unsigned Arith = (Op == OO_PlusPlus? 0 : 1);
+ Arith < NumArithmeticTypes; ++Arith) {
+ QualType ArithTy = ArithmeticTypes[Arith];
+ QualType ParamTypes[2]
+ = { Context.getLValueReferenceType(ArithTy), Context.IntTy };
+
+ // Non-volatile version.
+ if (NumArgs == 1)
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ AddBuiltinCandidate(ArithTy, ParamTypes, Args, 2, CandidateSet);
+
+ // Volatile version
+ ParamTypes[0] = Context.getLValueReferenceType(ArithTy.withVolatile());
+ if (NumArgs == 1)
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ AddBuiltinCandidate(ArithTy, ParamTypes, Args, 2, CandidateSet);
+ }
+
+ // C++ [over.built]p5:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type, and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator++(T*VQ&);
+ // T*VQ& operator--(T*VQ&);
+ // T* operator++(T*VQ&, int);
+ // T* operator--(T*VQ&, int);
+ for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
+ Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ // Skip pointer types that aren't pointers to object types.
+ if (!(*Ptr)->getAsPointerType()->getPointeeType()->isObjectType())
+ continue;
+
+ QualType ParamTypes[2] = {
+ Context.getLValueReferenceType(*Ptr), Context.IntTy
+ };
+
+ // Without volatile
+ if (NumArgs == 1)
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+
+ if (!Context.getCanonicalType(*Ptr).isVolatileQualified()) {
+ // With volatile
+ ParamTypes[0] = Context.getLValueReferenceType((*Ptr).withVolatile());
+ if (NumArgs == 1)
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+ break;
+
+ UnaryStar:
+ // C++ [over.built]p6:
+ // For every cv-qualified or cv-unqualified object type T, there
+ // exist candidate operator functions of the form
+ //
+ // T& operator*(T*);
+ //
+ // C++ [over.built]p7:
+ // For every function type T, there exist candidate operator
+ // functions of the form
+ // T& operator*(T*);
+ for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
+ Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ QualType ParamTy = *Ptr;
+ QualType PointeeTy = ParamTy->getAsPointerType()->getPointeeType();
+ AddBuiltinCandidate(Context.getLValueReferenceType(PointeeTy),
+ &ParamTy, Args, 1, CandidateSet);
+ }
+ break;
+
+ UnaryPlus:
+ // C++ [over.built]p8:
+ // For every type T, there exist candidate operator functions of
+ // the form
+ //
+ // T* operator+(T*);
+ for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
+ Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ QualType ParamTy = *Ptr;
+ AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet);
+ }
+
+ // Fall through
+
+ UnaryMinus:
+ // C++ [over.built]p9:
+ // For every promoted arithmetic type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator+(T);
+ // T operator-(T);
+ for (unsigned Arith = FirstPromotedArithmeticType;
+ Arith < LastPromotedArithmeticType; ++Arith) {
+ QualType ArithTy = ArithmeticTypes[Arith];
+ AddBuiltinCandidate(ArithTy, &ArithTy, Args, 1, CandidateSet);
+ }
+ break;
+
+ case OO_Tilde:
+ // C++ [over.built]p10:
+ // For every promoted integral type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator~(T);
+ for (unsigned Int = FirstPromotedIntegralType;
+ Int < LastPromotedIntegralType; ++Int) {
+ QualType IntTy = ArithmeticTypes[Int];
+ AddBuiltinCandidate(IntTy, &IntTy, Args, 1, CandidateSet);
+ }
+ break;
+
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Call:
+ assert(false && "Special operators don't use AddBuiltinOperatorCandidates");
+ break;
+
+ case OO_Comma:
+ UnaryAmp:
+ case OO_Arrow:
+ // C++ [over.match.oper]p3:
+ // -- For the operator ',', the unary operator '&', or the
+ // operator '->', the built-in candidates set is empty.
+ break;
+
+ case OO_Less:
+ case OO_Greater:
+ case OO_LessEqual:
+ case OO_GreaterEqual:
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ // C++ [over.built]p15:
+ //
+ // For every pointer or enumeration type T, there exist
+ // candidate operator functions of the form
+ //
+ // bool operator<(T, T);
+ // bool operator>(T, T);
+ // bool operator<=(T, T);
+ // bool operator>=(T, T);
+ // bool operator==(T, T);
+ // bool operator!=(T, T);
+ for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
+ Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ AddBuiltinCandidate(Context.BoolTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ for (BuiltinCandidateTypeSet::iterator Enum
+ = CandidateTypes.enumeration_begin();
+ Enum != CandidateTypes.enumeration_end(); ++Enum) {
+ QualType ParamTypes[2] = { *Enum, *Enum };
+ AddBuiltinCandidate(Context.BoolTy, ParamTypes, Args, 2, CandidateSet);
+ }
+
+ // Fall through.
+ isComparison = true;
+
+ BinaryPlus:
+ BinaryMinus:
+ if (!isComparison) {
+ // We didn't fall through, so we must have OO_Plus or OO_Minus.
+
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T
+ // there exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t);
+ // T& operator[](T*, ptrdiff_t); [BELOW]
+ // T* operator-(T*, ptrdiff_t);
+ // T* operator+(ptrdiff_t, T*);
+ // T& operator[](ptrdiff_t, T*); [BELOW]
+ //
+ // C++ [over.built]p14:
+ //
+ // For every T, where T is a pointer to object type, there
+ // exist candidate operator functions of the form
+ //
+ // ptrdiff_t operator-(T, T);
+ for (BuiltinCandidateTypeSet::iterator Ptr
+ = CandidateTypes.pointer_begin();
+ Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, Context.getPointerDiffType() };
+
+ // operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
+ AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+
+ if (Op == OO_Plus) {
+ // T* operator+(ptrdiff_t, T*);
+ ParamTypes[0] = ParamTypes[1];
+ ParamTypes[1] = *Ptr;
+ AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ } else {
+ // ptrdiff_t operator-(T, T);
+ ParamTypes[1] = *Ptr;
+ AddBuiltinCandidate(Context.getPointerDiffType(), ParamTypes,
+ Args, 2, CandidateSet);
+ }
+ }
+ }
+ // Fall through
+
+ case OO_Slash:
+ BinaryStar:
+ Conditional:
+ // C++ [over.built]p12:
+ //
+ // For every pair of promoted arithmetic types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator*(L, R);
+ // LR operator/(L, R);
+ // LR operator+(L, R);
+ // LR operator-(L, R);
+ // bool operator<(L, R);
+ // bool operator>(L, R);
+ // bool operator<=(L, R);
+ // bool operator>=(L, R);
+ // bool operator==(L, R);
+ // bool operator!=(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ //
+ // C++ [over.built]p24:
+ //
+ // For every pair of promoted arithmetic types L and R, there exist
+ // candidate operator functions of the form
+ //
+ // LR operator?(bool, L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ // Our candidates ignore the first parameter.
+ for (unsigned Left = FirstPromotedArithmeticType;
+ Left < LastPromotedArithmeticType; ++Left) {
+ for (unsigned Right = FirstPromotedArithmeticType;
+ Right < LastPromotedArithmeticType; ++Right) {
+ QualType LandR[2] = { ArithmeticTypes[Left], ArithmeticTypes[Right] };
+ QualType Result
+ = isComparison? Context.BoolTy
+ : UsualArithmeticConversionsType(LandR[0], LandR[1]);
+ AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+ }
+ }
+ break;
+
+ case OO_Percent:
+ BinaryAmp:
+ case OO_Caret:
+ case OO_Pipe:
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ // C++ [over.built]p17:
+ //
+ // For every pair of promoted integral types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator%(L, R);
+ // LR operator&(L, R);
+ // LR operator^(L, R);
+ // LR operator|(L, R);
+ // L operator<<(L, R);
+ // L operator>>(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ for (unsigned Left = FirstPromotedIntegralType;
+ Left < LastPromotedIntegralType; ++Left) {
+ for (unsigned Right = FirstPromotedIntegralType;
+ Right < LastPromotedIntegralType; ++Right) {
+ QualType LandR[2] = { ArithmeticTypes[Left], ArithmeticTypes[Right] };
+ QualType Result = (Op == OO_LessLess || Op == OO_GreaterGreater)
+ ? LandR[0]
+ : UsualArithmeticConversionsType(LandR[0], LandR[1]);
+ AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+ }
+ }
+ break;
+
+ case OO_Equal:
+ // C++ [over.built]p20:
+ //
+ // For every pair (T, VQ), where T is an enumeration or
+ // (FIXME:) pointer to member type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // VQ T& operator=(VQ T&, T);
+ for (BuiltinCandidateTypeSet::iterator Enum
+ = CandidateTypes.enumeration_begin();
+ Enum != CandidateTypes.enumeration_end(); ++Enum) {
+ QualType ParamTypes[2];
+
+ // T& operator=(T&, T)
+ ParamTypes[0] = Context.getLValueReferenceType(*Enum);
+ ParamTypes[1] = *Enum;
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssignmentOperator=*/false);
+
+ if (!Context.getCanonicalType(*Enum).isVolatileQualified()) {
+ // volatile T& operator=(volatile T&, T)
+ ParamTypes[0] = Context.getLValueReferenceType((*Enum).withVolatile());
+ ParamTypes[1] = *Enum;
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssignmentOperator=*/false);
+ }
+ }
+ // Fall through.
+
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ // C++ [over.built]p19:
+ //
+ // For every pair (T, VQ), where T is any type and VQ is either
+ // volatile or empty, there exist candidate operator functions
+ // of the form
+ //
+ // T*VQ& operator=(T*VQ&, T*);
+ //
+ // C++ [over.built]p21:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator+=(T*VQ&, ptrdiff_t);
+ // T*VQ& operator-=(T*VQ&, ptrdiff_t);
+ for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
+ Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = (Op == OO_Equal)? *Ptr : Context.getPointerDiffType();
+
+ // non-volatile version
+ ParamTypes[0] = Context.getLValueReferenceType(*Ptr);
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/Op == OO_Equal);
+
+ if (!Context.getCanonicalType(*Ptr).isVolatileQualified()) {
+ // volatile version
+ ParamTypes[0] = Context.getLValueReferenceType((*Ptr).withVolatile());
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/Op == OO_Equal);
+ }
+ }
+ // Fall through.
+
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ // C++ [over.built]p18:
+ //
+ // For every triple (L, VQ, R), where L is an arithmetic type,
+ // VQ is either volatile or empty, and R is a promoted
+ // arithmetic type, there exist candidate operator functions of
+ // the form
+ //
+ // VQ L& operator=(VQ L&, R);
+ // VQ L& operator*=(VQ L&, R);
+ // VQ L& operator/=(VQ L&, R);
+ // VQ L& operator+=(VQ L&, R);
+ // VQ L& operator-=(VQ L&, R);
+ for (unsigned Left = 0; Left < NumArithmeticTypes; ++Left) {
+ for (unsigned Right = FirstPromotedArithmeticType;
+ Right < LastPromotedArithmeticType; ++Right) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = ArithmeticTypes[Right];
+
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] = Context.getLValueReferenceType(ArithmeticTypes[Left]);
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/Op == OO_Equal);
+
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ ParamTypes[0] = ArithmeticTypes[Left].withVolatile();
+ ParamTypes[0] = Context.getLValueReferenceType(ParamTypes[0]);
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/Op == OO_Equal);
+ }
+ }
+ break;
+
+ case OO_PercentEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ case OO_AmpEqual:
+ case OO_CaretEqual:
+ case OO_PipeEqual:
+ // C++ [over.built]p22:
+ //
+ // For every triple (L, VQ, R), where L is an integral type, VQ
+ // is either volatile or empty, and R is a promoted integral
+ // type, there exist candidate operator functions of the form
+ //
+ // VQ L& operator%=(VQ L&, R);
+ // VQ L& operator<<=(VQ L&, R);
+ // VQ L& operator>>=(VQ L&, R);
+ // VQ L& operator&=(VQ L&, R);
+ // VQ L& operator^=(VQ L&, R);
+ // VQ L& operator|=(VQ L&, R);
+ for (unsigned Left = FirstIntegralType; Left < LastIntegralType; ++Left) {
+ for (unsigned Right = FirstPromotedIntegralType;
+ Right < LastPromotedIntegralType; ++Right) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = ArithmeticTypes[Right];
+
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] = Context.getLValueReferenceType(ArithmeticTypes[Left]);
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet);
+
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ ParamTypes[0] = ArithmeticTypes[Left];
+ ParamTypes[0].addVolatile();
+ ParamTypes[0] = Context.getLValueReferenceType(ParamTypes[0]);
+ AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+ break;
+
+ case OO_Exclaim: {
+ // C++ [over.operator]p23:
+ //
+ // There also exist candidate operator functions of the form
+ //
+ // bool operator!(bool);
+ // bool operator&&(bool, bool); [BELOW]
+ // bool operator||(bool, bool); [BELOW]
+ QualType ParamTy = Context.BoolTy;
+ AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/1);
+ break;
+ }
+
+ case OO_AmpAmp:
+ case OO_PipePipe: {
+ // C++ [over.operator]p23:
+ //
+ // There also exist candidate operator functions of the form
+ //
+ // bool operator!(bool); [ABOVE]
+ // bool operator&&(bool, bool);
+ // bool operator||(bool, bool);
+ QualType ParamTypes[2] = { Context.BoolTy, Context.BoolTy };
+ AddBuiltinCandidate(Context.BoolTy, ParamTypes, Args, 2, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/2);
+ break;
+ }
+
+ case OO_Subscript:
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T there
+ // exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t); [ABOVE]
+ // T& operator[](T*, ptrdiff_t);
+ // T* operator-(T*, ptrdiff_t); [ABOVE]
+ // T* operator+(ptrdiff_t, T*); [ABOVE]
+ // T& operator[](ptrdiff_t, T*);
+ for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin();
+ Ptr != CandidateTypes.pointer_end(); ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, Context.getPointerDiffType() };
+ QualType PointeeType = (*Ptr)->getAsPointerType()->getPointeeType();
+ QualType ResultTy = Context.getLValueReferenceType(PointeeType);
+
+ // T& operator[](T*, ptrdiff_t)
+ AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+
+ // T& operator[](ptrdiff_t, T*);
+ ParamTypes[0] = ParamTypes[1];
+ ParamTypes[1] = *Ptr;
+ AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ break;
+
+ case OO_ArrowStar:
+ // FIXME: No support for pointer-to-members yet.
+ break;
+
+ case OO_Conditional:
+ // Note that we don't consider the first argument, since it has been
+ // contextually converted to bool long ago. The candidates below are
+ // therefore added as binary.
+ //
+ // C++ [over.built]p24:
+ // For every type T, where T is a pointer or pointer-to-member type,
+ // there exist candidate operator functions of the form
+ //
+ // T operator?(bool, T, T);
+ //
+ for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes.pointer_begin(),
+ E = CandidateTypes.pointer_end(); Ptr != E; ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ }
+ for (BuiltinCandidateTypeSet::iterator Ptr =
+ CandidateTypes.member_pointer_begin(),
+ E = CandidateTypes.member_pointer_end(); Ptr != E; ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ }
+ goto Conditional;
+ }
+}
+
+/// \brief Add function candidates found via argument-dependent lookup
+/// to the set of overloading candidates.
+///
+/// This routine performs argument-dependent name lookup based on the
+/// given function name (which may also be an operator name) and adds
+/// all of the overload candidates found by ADL to the overload
+/// candidate set (C++ [basic.lookup.argdep]).
+void
+Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet) {
+ FunctionSet Functions;
+
+ // Record all of the function candidates that we've already
+ // added to the overload set, so that we don't add those same
+ // candidates a second time.
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
+ CandEnd = CandidateSet.end();
+ Cand != CandEnd; ++Cand)
+ if (Cand->Function)
+ Functions.insert(Cand->Function);
+
+ ArgumentDependentLookup(Name, Args, NumArgs, Functions);
+
+ // Erase all of the candidates we already knew about.
+ // FIXME: This is suboptimal. Is there a better way?
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
+ CandEnd = CandidateSet.end();
+ Cand != CandEnd; ++Cand)
+ if (Cand->Function)
+ Functions.erase(Cand->Function);
+
+ // For each of the ADL candidates we found, add it to the overload
+ // set.
+ for (FunctionSet::iterator Func = Functions.begin(),
+ FuncEnd = Functions.end();
+ Func != FuncEnd; ++Func)
+ AddOverloadCandidate(*Func, Args, NumArgs, CandidateSet);
+}
+
+/// isBetterOverloadCandidate - Determines whether the first overload
+/// candidate is a better candidate than the second (C++ 13.3.3p1).
+bool
+Sema::isBetterOverloadCandidate(const OverloadCandidate& Cand1,
+ const OverloadCandidate& Cand2)
+{
+ // Define viable functions to be better candidates than non-viable
+ // functions.
+ if (!Cand2.Viable)
+ return Cand1.Viable;
+ else if (!Cand1.Viable)
+ return false;
+
+ // C++ [over.match.best]p1:
+ //
+ // -- if F is a static member function, ICS1(F) is defined such
+ // that ICS1(F) is neither better nor worse than ICS1(G) for
+ // any function G, and, symmetrically, ICS1(G) is neither
+ // better nor worse than ICS1(F).
+ unsigned StartArg = 0;
+ if (Cand1.IgnoreObjectArgument || Cand2.IgnoreObjectArgument)
+ StartArg = 1;
+
+ // (C++ 13.3.3p1): a viable function F1 is defined to be a better
+ // function than another viable function F2 if for all arguments i,
+ // ICSi(F1) is not a worse conversion sequence than ICSi(F2), and
+ // then...
+ unsigned NumArgs = Cand1.Conversions.size();
+ assert(Cand2.Conversions.size() == NumArgs && "Overload candidate mismatch");
+ bool HasBetterConversion = false;
+ for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
+ switch (CompareImplicitConversionSequences(Cand1.Conversions[ArgIdx],
+ Cand2.Conversions[ArgIdx])) {
+ case ImplicitConversionSequence::Better:
+ // Cand1 has a better conversion sequence.
+ HasBetterConversion = true;
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ // Cand1 can't be better than Cand2.
+ return false;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ // Do nothing.
+ break;
+ }
+ }
+
+ if (HasBetterConversion)
+ return true;
+
+ // FIXME: Several other bullets in (C++ 13.3.3p1) need to be
+ // implemented, but they require template support.
+
+ // C++ [over.match.best]p1b4:
+ //
+ // -- the context is an initialization by user-defined conversion
+ // (see 8.5, 13.3.1.5) and the standard conversion sequence
+ // from the return type of F1 to the destination type (i.e.,
+ // the type of the entity being initialized) is a better
+ // conversion sequence than the standard conversion sequence
+ // from the return type of F2 to the destination type.
+ if (Cand1.Function && Cand2.Function &&
+ isa<CXXConversionDecl>(Cand1.Function) &&
+ isa<CXXConversionDecl>(Cand2.Function)) {
+ switch (CompareStandardConversionSequences(Cand1.FinalConversion,
+ Cand2.FinalConversion)) {
+ case ImplicitConversionSequence::Better:
+ // Cand1 has a better conversion sequence.
+ return true;
+
+ case ImplicitConversionSequence::Worse:
+ // Cand1 can't be better than Cand2.
+ return false;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ // Do nothing
+ break;
+ }
+ }
+
+ return false;
+}
+
+/// BestViableFunction - Computes the best viable function (C++ 13.3.3)
+/// within an overload candidate set. If overloading is successful,
+/// the result will be OR_Success and Best will be set to point to the
+/// best viable function within the candidate set. Otherwise, one of
+/// several kinds of errors will be returned; see
+/// Sema::OverloadingResult.
+Sema::OverloadingResult
+Sema::BestViableFunction(OverloadCandidateSet& CandidateSet,
+ OverloadCandidateSet::iterator& Best)
+{
+ // Find the best viable function.
+ Best = CandidateSet.end();
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin();
+ Cand != CandidateSet.end(); ++Cand) {
+ if (Cand->Viable) {
+ if (Best == CandidateSet.end() || isBetterOverloadCandidate(*Cand, *Best))
+ Best = Cand;
+ }
+ }
+
+ // If we didn't find any viable functions, abort.
+ if (Best == CandidateSet.end())
+ return OR_No_Viable_Function;
+
+ // Make sure that this function is better than every other viable
+ // function. If not, we have an ambiguity.
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin();
+ Cand != CandidateSet.end(); ++Cand) {
+ if (Cand->Viable &&
+ Cand != Best &&
+ !isBetterOverloadCandidate(*Best, *Cand)) {
+ Best = CandidateSet.end();
+ return OR_Ambiguous;
+ }
+ }
+
+ // Best is the best viable function.
+ if (Best->Function &&
+ (Best->Function->isDeleted() ||
+ Best->Function->getAttr<UnavailableAttr>()))
+ return OR_Deleted;
+
+ // If Best refers to a function that is either deleted (C++0x) or
+ // unavailable (Clang extension) report an error.
+
+ return OR_Success;
+}
+
+/// PrintOverloadCandidates - When overload resolution fails, prints
+/// diagnostic messages containing the candidates in the candidate
+/// set. If OnlyViable is true, only viable candidates will be printed.
+void
+Sema::PrintOverloadCandidates(OverloadCandidateSet& CandidateSet,
+ bool OnlyViable)
+{
+ OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
+ LastCand = CandidateSet.end();
+ for (; Cand != LastCand; ++Cand) {
+ if (Cand->Viable || !OnlyViable) {
+ if (Cand->Function) {
+ if (Cand->Function->isDeleted() ||
+ Cand->Function->getAttr<UnavailableAttr>()) {
+ // Deleted or "unavailable" function.
+ Diag(Cand->Function->getLocation(), diag::err_ovl_candidate_deleted)
+ << Cand->Function->isDeleted();
+ } else {
+ // Normal function
+ // FIXME: Give a better reason!
+ Diag(Cand->Function->getLocation(), diag::err_ovl_candidate);
+ }
+ } else if (Cand->IsSurrogate) {
+ // Desugar the type of the surrogate down to a function type,
+ // retaining as many typedefs as possible while still showing
+ // the function type (and, therefore, its parameter types).
+ QualType FnType = Cand->Surrogate->getConversionType();
+ bool isLValueReference = false;
+ bool isRValueReference = false;
+ bool isPointer = false;
+ if (const LValueReferenceType *FnTypeRef =
+ FnType->getAsLValueReferenceType()) {
+ FnType = FnTypeRef->getPointeeType();
+ isLValueReference = true;
+ } else if (const RValueReferenceType *FnTypeRef =
+ FnType->getAsRValueReferenceType()) {
+ FnType = FnTypeRef->getPointeeType();
+ isRValueReference = true;
+ }
+ if (const PointerType *FnTypePtr = FnType->getAsPointerType()) {
+ FnType = FnTypePtr->getPointeeType();
+ isPointer = true;
+ }
+ // Desugar down to a function type.
+ FnType = QualType(FnType->getAsFunctionType(), 0);
+ // Reconstruct the pointer/reference as appropriate.
+ if (isPointer) FnType = Context.getPointerType(FnType);
+ if (isRValueReference) FnType = Context.getRValueReferenceType(FnType);
+ if (isLValueReference) FnType = Context.getLValueReferenceType(FnType);
+
+ Diag(Cand->Surrogate->getLocation(), diag::err_ovl_surrogate_cand)
+ << FnType;
+ } else {
+ // FIXME: We need to get the identifier in here
+ // FIXME: Do we want the error message to point at the operator?
+ // (built-ins won't have a location)
+ QualType FnType
+ = Context.getFunctionType(Cand->BuiltinTypes.ResultTy,
+ Cand->BuiltinTypes.ParamTypes,
+ Cand->Conversions.size(),
+ false, 0);
+
+ Diag(SourceLocation(), diag::err_ovl_builtin_candidate) << FnType;
+ }
+ }
+ }
+}
+
+/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
+/// an overloaded function (C++ [over.over]), where @p From is an
+/// expression with overloaded function type and @p ToType is the type
+/// we're trying to resolve to. For example:
+///
+/// @code
+/// int f(double);
+/// int f(int);
+///
+/// int (*pfd)(double) = f; // selects f(double)
+/// @endcode
+///
+/// This routine returns the resulting FunctionDecl if it could be
+/// resolved, and NULL otherwise. When @p Complain is true, this
+/// routine will emit diagnostics if there is an error.
+FunctionDecl *
+Sema::ResolveAddressOfOverloadedFunction(Expr *From, QualType ToType,
+ bool Complain) {
+ QualType FunctionType = ToType;
+ bool IsMember = false;
+ if (const PointerType *ToTypePtr = ToType->getAsPointerType())
+ FunctionType = ToTypePtr->getPointeeType();
+ else if (const ReferenceType *ToTypeRef = ToType->getAsReferenceType())
+ FunctionType = ToTypeRef->getPointeeType();
+ else if (const MemberPointerType *MemTypePtr =
+ ToType->getAsMemberPointerType()) {
+ FunctionType = MemTypePtr->getPointeeType();
+ IsMember = true;
+ }
+
+ // We only look at pointers or references to functions.
+ if (!FunctionType->isFunctionType())
+ return 0;
+
+ // Find the actual overloaded function declaration.
+ OverloadedFunctionDecl *Ovl = 0;
+
+ // C++ [over.over]p1:
+ // [...] [Note: any redundant set of parentheses surrounding the
+ // overloaded function name is ignored (5.1). ]
+ Expr *OvlExpr = From->IgnoreParens();
+
+ // C++ [over.over]p1:
+ // [...] The overloaded function name can be preceded by the &
+ // operator.
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(OvlExpr)) {
+ if (UnOp->getOpcode() == UnaryOperator::AddrOf)
+ OvlExpr = UnOp->getSubExpr()->IgnoreParens();
+ }
+
+ // Try to dig out the overloaded function.
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(OvlExpr))
+ Ovl = dyn_cast<OverloadedFunctionDecl>(DR->getDecl());
+
+ // If there's no overloaded function declaration, we're done.
+ if (!Ovl)
+ return 0;
+
+ // Look through all of the overloaded functions, searching for one
+ // whose type matches exactly.
+ // FIXME: When templates or using declarations come along, we'll actually
+ // have to deal with duplicates, partial ordering, etc. For now, we
+ // can just do a simple search.
+ FunctionType = Context.getCanonicalType(FunctionType.getUnqualifiedType());
+ for (OverloadedFunctionDecl::function_iterator Fun = Ovl->function_begin();
+ Fun != Ovl->function_end(); ++Fun) {
+ // C++ [over.over]p3:
+ // Non-member functions and static member functions match
+ // targets of type "pointer-to-function" or "reference-to-function."
+ // Nonstatic member functions match targets of
+ // type "pointer-to-member-function."
+ // Note that according to DR 247, the containing class does not matter.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*Fun)) {
+ // Skip non-static functions when converting to pointer, and static
+ // when converting to member pointer.
+ if (Method->isStatic() == IsMember)
+ continue;
+ } else if (IsMember)
+ continue;
+
+ if (FunctionType == Context.getCanonicalType((*Fun)->getType()))
+ return *Fun;
+ }
+
+ return 0;
+}
+
+/// ResolveOverloadedCallFn - Given the call expression that calls Fn
+/// (which eventually refers to the declaration Func) and the call
+/// arguments Args/NumArgs, attempt to resolve the function call down
+/// to a specific function. If overload resolution succeeds, returns
+/// the function declaration produced by overload
+/// resolution. Otherwise, emits diagnostics, deletes all of the
+/// arguments and Fn, and returns NULL.
+FunctionDecl *Sema::ResolveOverloadedCallFn(Expr *Fn, NamedDecl *Callee,
+ DeclarationName UnqualifiedName,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc,
+ bool &ArgumentDependentLookup) {
+ OverloadCandidateSet CandidateSet;
+
+ // Add the functions denoted by Callee to the set of candidate
+ // functions. While we're doing so, track whether argument-dependent
+ // lookup still applies, per:
+ //
+ // C++0x [basic.lookup.argdep]p3:
+ // Let X be the lookup set produced by unqualified lookup (3.4.1)
+ // and let Y be the lookup set produced by argument dependent
+ // lookup (defined as follows). If X contains
+ //
+ // -- a declaration of a class member, or
+ //
+ // -- a block-scope function declaration that is not a
+ // using-declaration, or
+ //
+ // -- a declaration that is neither a function or a function
+ // template
+ //
+ // then Y is empty.
+ if (OverloadedFunctionDecl *Ovl
+ = dyn_cast_or_null<OverloadedFunctionDecl>(Callee)) {
+ for (OverloadedFunctionDecl::function_iterator Func = Ovl->function_begin(),
+ FuncEnd = Ovl->function_end();
+ Func != FuncEnd; ++Func) {
+ AddOverloadCandidate(*Func, Args, NumArgs, CandidateSet);
+
+ if ((*Func)->getDeclContext()->isRecord() ||
+ (*Func)->getDeclContext()->isFunctionOrMethod())
+ ArgumentDependentLookup = false;
+ }
+ } else if (FunctionDecl *Func = dyn_cast_or_null<FunctionDecl>(Callee)) {
+ AddOverloadCandidate(Func, Args, NumArgs, CandidateSet);
+
+ if (Func->getDeclContext()->isRecord() ||
+ Func->getDeclContext()->isFunctionOrMethod())
+ ArgumentDependentLookup = false;
+ }
+
+ if (Callee)
+ UnqualifiedName = Callee->getDeclName();
+
+ if (ArgumentDependentLookup)
+ AddArgumentDependentLookupCandidates(UnqualifiedName, Args, NumArgs,
+ CandidateSet);
+
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success:
+ return Best->Function;
+
+ case OR_No_Viable_Function:
+ Diag(Fn->getSourceRange().getBegin(),
+ diag::err_ovl_no_viable_function_in_call)
+ << UnqualifiedName << Fn->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/false);
+ break;
+
+ case OR_Ambiguous:
+ Diag(Fn->getSourceRange().getBegin(), diag::err_ovl_ambiguous_call)
+ << UnqualifiedName << Fn->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ break;
+
+ case OR_Deleted:
+ Diag(Fn->getSourceRange().getBegin(), diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted()
+ << UnqualifiedName
+ << Fn->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ break;
+ }
+
+ // Overload resolution failed. Destroy all of the subexpressions and
+ // return NULL.
+ Fn->Destroy(Context);
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg)
+ Args[Arg]->Destroy(Context);
+ return 0;
+}
+
+/// \brief Create a unary operation that may resolve to an overloaded
+/// operator.
+///
+/// \param OpLoc The location of the operator itself (e.g., '*').
+///
+/// \param OpcIn The UnaryOperator::Opcode that describes this
+/// operator.
+///
+/// \param Functions The set of non-member functions that will be
+/// considered by overload resolution. The caller needs to build this
+/// set based on the context using, e.g.,
+/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
+/// set should not contain any member functions; those will be added
+/// by CreateOverloadedUnaryOp().
+///
+/// \param input The input argument.
+Sema::OwningExprResult Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc,
+ unsigned OpcIn,
+ FunctionSet &Functions,
+ ExprArg input) {
+ UnaryOperator::Opcode Opc = static_cast<UnaryOperator::Opcode>(OpcIn);
+ Expr *Input = (Expr *)input.get();
+
+ OverloadedOperatorKind Op = UnaryOperator::getOverloadedOperator(Opc);
+ assert(Op != OO_None && "Invalid opcode for overloaded unary operator");
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+
+ Expr *Args[2] = { Input, 0 };
+ unsigned NumArgs = 1;
+
+ // For post-increment and post-decrement, add the implicit '0' as
+ // the second argument, so that we know this is a post-increment or
+ // post-decrement.
+ if (Opc == UnaryOperator::PostInc || Opc == UnaryOperator::PostDec) {
+ llvm::APSInt Zero(Context.getTypeSize(Context.IntTy), false);
+ Args[1] = new (Context) IntegerLiteral(Zero, Context.IntTy,
+ SourceLocation());
+ NumArgs = 2;
+ }
+
+ if (Input->isTypeDependent()) {
+ OverloadedFunctionDecl *Overloads
+ = OverloadedFunctionDecl::Create(Context, CurContext, OpName);
+ for (FunctionSet::iterator Func = Functions.begin(),
+ FuncEnd = Functions.end();
+ Func != FuncEnd; ++Func)
+ Overloads->addOverload(*Func);
+
+ DeclRefExpr *Fn = new (Context) DeclRefExpr(Overloads, Context.OverloadTy,
+ OpLoc, false, false);
+
+ input.release();
+ return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
+ &Args[0], NumArgs,
+ Context.DependentTy,
+ OpLoc));
+ }
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet;
+
+ // Add the candidates from the given function set.
+ AddFunctionCandidates(Functions, &Args[0], NumArgs, CandidateSet, false);
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(Op, OpLoc, &Args[0], NumArgs, CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(Op, &Args[0], NumArgs, CandidateSet);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ if (PerformObjectArgumentInitialization(Input, Method))
+ return ExprError();
+ } else {
+ // Convert the arguments.
+ if (PerformCopyInitialization(Input,
+ FnDecl->getParamDecl(0)->getType(),
+ "passing"))
+ return ExprError();
+ }
+
+ // Determine the result type
+ QualType ResultTy
+ = FnDecl->getType()->getAsFunctionType()->getResultType();
+ ResultTy = ResultTy.getNonReferenceType();
+
+ // Build the actual expression node.
+ Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
+ SourceLocation());
+ UsualUnaryConversions(FnExpr);
+
+ input.release();
+ return Owned(new (Context) CXXOperatorCallExpr(Context, Op, FnExpr,
+ &Input, 1, ResultTy,
+ OpLoc));
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ if (PerformImplicitConversion(Input, Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], "passing"))
+ return ExprError();
+
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function:
+ // No viable function; fall through to handling this as a
+ // built-in operator, which will produce an error message for us.
+ break;
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper)
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Input->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Input->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+ }
+
+ // Either we found no viable overloaded operator or we matched a
+ // built-in operator. In either case, fall through to trying to
+ // build a built-in operation.
+ input.release();
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Owned(Input));
+}
+
+/// \brief Create a binary operation that may resolve to an overloaded
+/// operator.
+///
+/// \param OpLoc The location of the operator itself (e.g., '+').
+///
+/// \param OpcIn The BinaryOperator::Opcode that describes this
+/// operator.
+///
+/// \param Functions The set of non-member functions that will be
+/// considered by overload resolution. The caller needs to build this
+/// set based on the context using, e.g.,
+/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
+/// set should not contain any member functions; those will be added
+/// by CreateOverloadedBinOp().
+///
+/// \param LHS Left-hand argument.
+/// \param RHS Right-hand argument.
+Sema::OwningExprResult
+Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
+ unsigned OpcIn,
+ FunctionSet &Functions,
+ Expr *LHS, Expr *RHS) {
+ Expr *Args[2] = { LHS, RHS };
+
+ BinaryOperator::Opcode Opc = static_cast<BinaryOperator::Opcode>(OpcIn);
+ OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc);
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+
+ // If either side is type-dependent, create an appropriate dependent
+ // expression.
+ if (LHS->isTypeDependent() || RHS->isTypeDependent()) {
+ // .* cannot be overloaded.
+ if (Opc == BinaryOperator::PtrMemD)
+ return Owned(new (Context) BinaryOperator(LHS, RHS, Opc,
+ Context.DependentTy, OpLoc));
+
+ OverloadedFunctionDecl *Overloads
+ = OverloadedFunctionDecl::Create(Context, CurContext, OpName);
+ for (FunctionSet::iterator Func = Functions.begin(),
+ FuncEnd = Functions.end();
+ Func != FuncEnd; ++Func)
+ Overloads->addOverload(*Func);
+
+ DeclRefExpr *Fn = new (Context) DeclRefExpr(Overloads, Context.OverloadTy,
+ OpLoc, false, false);
+
+ return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
+ Args, 2,
+ Context.DependentTy,
+ OpLoc));
+ }
+
+ // If this is the .* operator, which is not overloadable, just
+ // create a built-in binary operator.
+ if (Opc == BinaryOperator::PtrMemD)
+ return CreateBuiltinBinOp(OpLoc, Opc, LHS, RHS);
+
+ // If this is one of the assignment operators, we only perform
+ // overload resolution if the left-hand side is a class or
+ // enumeration type (C++ [expr.ass]p3).
+ if (Opc >= BinaryOperator::Assign && Opc <= BinaryOperator::OrAssign &&
+ !LHS->getType()->isOverloadableType())
+ return CreateBuiltinBinOp(OpLoc, Opc, LHS, RHS);
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet;
+
+ // Add the candidates from the given function set.
+ AddFunctionCandidates(Functions, Args, 2, CandidateSet, false);
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(Op, OpLoc, Args, 2, CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(Op, Args, 2, CandidateSet);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ if (PerformObjectArgumentInitialization(LHS, Method) ||
+ PerformCopyInitialization(RHS, FnDecl->getParamDecl(0)->getType(),
+ "passing"))
+ return ExprError();
+ } else {
+ // Convert the arguments.
+ if (PerformCopyInitialization(LHS, FnDecl->getParamDecl(0)->getType(),
+ "passing") ||
+ PerformCopyInitialization(RHS, FnDecl->getParamDecl(1)->getType(),
+ "passing"))
+ return ExprError();
+ }
+
+ // Determine the result type
+ QualType ResultTy
+ = FnDecl->getType()->getAsFunctionType()->getResultType();
+ ResultTy = ResultTy.getNonReferenceType();
+
+ // Build the actual expression node.
+ Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(),
+ SourceLocation());
+ UsualUnaryConversions(FnExpr);
+
+ return Owned(new (Context) CXXOperatorCallExpr(Context, Op, FnExpr,
+ Args, 2, ResultTy,
+ OpLoc));
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ if (PerformImplicitConversion(LHS, Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], "passing") ||
+ PerformImplicitConversion(RHS, Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], "passing"))
+ return ExprError();
+
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function:
+ // For class as left operand for assignment or compound assigment operator
+ // do not fall through to handling in built-in, but report that no overloaded
+ // assignment operator found
+ if (LHS->getType()->isRecordType() && Opc >= BinaryOperator::Assign && Opc <= BinaryOperator::OrAssign) {
+ Diag(OpLoc, diag::err_ovl_no_viable_oper)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return ExprError();
+ }
+ // No viable function; fall through to handling this as a
+ // built-in operator, which will produce an error message for us.
+ break;
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << BinaryOperator::getOpcodeStr(Opc)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return ExprError();
+ }
+
+ // Either we found no viable overloaded operator or we matched a
+ // built-in operator. In either case, try to build a built-in
+ // operation.
+ return CreateBuiltinBinOp(OpLoc, Opc, LHS, RHS);
+}
+
+/// BuildCallToMemberFunction - Build a call to a member
+/// function. MemExpr is the expression that refers to the member
+/// function (and includes the object parameter), Args/NumArgs are the
+/// arguments to the function call (not including the object
+/// parameter). The caller needs to validate that the member
+/// expression refers to a member function or an overloaded member
+/// function.
+Sema::ExprResult
+Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
+ SourceLocation LParenLoc, Expr **Args,
+ unsigned NumArgs, SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ // Dig out the member expression. This holds both the object
+ // argument and the member function we're referring to.
+ MemberExpr *MemExpr = 0;
+ if (ParenExpr *ParenE = dyn_cast<ParenExpr>(MemExprE))
+ MemExpr = dyn_cast<MemberExpr>(ParenE->getSubExpr());
+ else
+ MemExpr = dyn_cast<MemberExpr>(MemExprE);
+ assert(MemExpr && "Building member call without member expression");
+
+ // Extract the object argument.
+ Expr *ObjectArg = MemExpr->getBase();
+
+ CXXMethodDecl *Method = 0;
+ if (OverloadedFunctionDecl *Ovl
+ = dyn_cast<OverloadedFunctionDecl>(MemExpr->getMemberDecl())) {
+ // Add overload candidates
+ OverloadCandidateSet CandidateSet;
+ for (OverloadedFunctionDecl::function_iterator Func = Ovl->function_begin(),
+ FuncEnd = Ovl->function_end();
+ Func != FuncEnd; ++Func) {
+ assert(isa<CXXMethodDecl>(*Func) && "Function is not a method");
+ Method = cast<CXXMethodDecl>(*Func);
+ AddMethodCandidate(Method, ObjectArg, Args, NumArgs, CandidateSet,
+ /*SuppressUserConversions=*/false);
+ }
+
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success:
+ Method = cast<CXXMethodDecl>(Best->Function);
+ break;
+
+ case OR_No_Viable_Function:
+ Diag(MemExpr->getSourceRange().getBegin(),
+ diag::err_ovl_no_viable_member_function_in_call)
+ << Ovl->getDeclName() << MemExprE->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/false);
+ // FIXME: Leaking incoming expressions!
+ return true;
+
+ case OR_Ambiguous:
+ Diag(MemExpr->getSourceRange().getBegin(),
+ diag::err_ovl_ambiguous_member_call)
+ << Ovl->getDeclName() << MemExprE->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/false);
+ // FIXME: Leaking incoming expressions!
+ return true;
+
+ case OR_Deleted:
+ Diag(MemExpr->getSourceRange().getBegin(),
+ diag::err_ovl_deleted_member_call)
+ << Best->Function->isDeleted()
+ << Ovl->getDeclName() << MemExprE->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/false);
+ // FIXME: Leaking incoming expressions!
+ return true;
+ }
+
+ FixOverloadedFunctionReference(MemExpr, Method);
+ } else {
+ Method = dyn_cast<CXXMethodDecl>(MemExpr->getMemberDecl());
+ }
+
+ assert(Method && "Member call to something that isn't a method?");
+ ExprOwningPtr<CXXMemberCallExpr>
+ TheCall(this, new (Context) CXXMemberCallExpr(Context, MemExpr, Args,
+ NumArgs,
+ Method->getResultType().getNonReferenceType(),
+ RParenLoc));
+
+ // Convert the object argument (for a non-static member function call).
+ if (!Method->isStatic() &&
+ PerformObjectArgumentInitialization(ObjectArg, Method))
+ return true;
+ MemExpr->setBase(ObjectArg);
+
+ // Convert the rest of the arguments
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(Method->getType());
+ if (ConvertArgumentsForCall(&*TheCall, MemExpr, Method, Proto, Args, NumArgs,
+ RParenLoc))
+ return true;
+
+ return CheckFunctionCall(Method, TheCall.take()).release();
+}
+
+/// BuildCallToObjectOfClassType - Build a call to an object of class
+/// type (C++ [over.call.object]), which can end up invoking an
+/// overloaded function call operator (@c operator()) or performing a
+/// user-defined conversion on the object argument.
+Sema::ExprResult
+Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation *CommaLocs,
+ SourceLocation RParenLoc) {
+ assert(Object->getType()->isRecordType() && "Requires object type argument");
+ const RecordType *Record = Object->getType()->getAsRecordType();
+
+ // C++ [over.call.object]p1:
+ // If the primary-expression E in the function call syntax
+ // evaluates to a class object of type “cv T”, then the set of
+ // candidate functions includes at least the function call
+ // operators of T. The function call operators of T are obtained by
+ // ordinary lookup of the name operator() in the context of
+ // (E).operator().
+ OverloadCandidateSet CandidateSet;
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_const_iterator Oper, OperEnd;
+ for (llvm::tie(Oper, OperEnd) = Record->getDecl()->lookup(Context, OpName);
+ Oper != OperEnd; ++Oper)
+ AddMethodCandidate(cast<CXXMethodDecl>(*Oper), Object, Args, NumArgs,
+ CandidateSet, /*SuppressUserConversions=*/false);
+
+ // C++ [over.call.object]p2:
+ // In addition, for each conversion function declared in T of the
+ // form
+ //
+ // operator conversion-type-id () cv-qualifier;
+ //
+ // where cv-qualifier is the same cv-qualification as, or a
+ // greater cv-qualification than, cv, and where conversion-type-id
+ // denotes the type "pointer to function of (P1,...,Pn) returning
+ // R", or the type "reference to pointer to function of
+ // (P1,...,Pn) returning R", or the type "reference to function
+ // of (P1,...,Pn) returning R", a surrogate call function [...]
+ // is also considered as a candidate function. Similarly,
+ // surrogate call functions are added to the set of candidate
+ // functions for each conversion function declared in an
+ // accessible base class provided the function is not hidden
+ // within T by another intervening declaration.
+ //
+ // FIXME: Look in base classes for more conversion operators!
+ OverloadedFunctionDecl *Conversions
+ = cast<CXXRecordDecl>(Record->getDecl())->getConversionFunctions();
+ for (OverloadedFunctionDecl::function_iterator
+ Func = Conversions->function_begin(),
+ FuncEnd = Conversions->function_end();
+ Func != FuncEnd; ++Func) {
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(*Func);
+
+ // Strip the reference type (if any) and then the pointer type (if
+ // any) to get down to what might be a function type.
+ QualType ConvType = Conv->getConversionType().getNonReferenceType();
+ if (const PointerType *ConvPtrType = ConvType->getAsPointerType())
+ ConvType = ConvPtrType->getPointeeType();
+
+ if (const FunctionProtoType *Proto = ConvType->getAsFunctionProtoType())
+ AddSurrogateCandidate(Conv, Proto, Object, Args, NumArgs, CandidateSet);
+ }
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success:
+ // Overload resolution succeeded; we'll build the appropriate call
+ // below.
+ break;
+
+ case OR_No_Viable_Function:
+ Diag(Object->getSourceRange().getBegin(),
+ diag::err_ovl_no_viable_object_call)
+ << Object->getType() << Object->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/false);
+ break;
+
+ case OR_Ambiguous:
+ Diag(Object->getSourceRange().getBegin(),
+ diag::err_ovl_ambiguous_object_call)
+ << Object->getType() << Object->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ break;
+
+ case OR_Deleted:
+ Diag(Object->getSourceRange().getBegin(),
+ diag::err_ovl_deleted_object_call)
+ << Best->Function->isDeleted()
+ << Object->getType() << Object->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ break;
+ }
+
+ if (Best == CandidateSet.end()) {
+ // We had an error; delete all of the subexpressions and return
+ // the error.
+ Object->Destroy(Context);
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ Args[ArgIdx]->Destroy(Context);
+ return true;
+ }
+
+ if (Best->Function == 0) {
+ // Since there is no function declaration, this is one of the
+ // surrogate candidates. Dig out the conversion function.
+ CXXConversionDecl *Conv
+ = cast<CXXConversionDecl>(
+ Best->Conversions[0].UserDefined.ConversionFunction);
+
+ // We selected one of the surrogate functions that converts the
+ // object parameter to a function pointer. Perform the conversion
+ // on the object argument, then let ActOnCallExpr finish the job.
+ // FIXME: Represent the user-defined conversion in the AST!
+ ImpCastExprToType(Object,
+ Conv->getConversionType().getNonReferenceType(),
+ Conv->getConversionType()->isLValueReferenceType());
+ return ActOnCallExpr(S, ExprArg(*this, Object), LParenLoc,
+ MultiExprArg(*this, (ExprTy**)Args, NumArgs),
+ CommaLocs, RParenLoc).release();
+ }
+
+ // We found an overloaded operator(). Build a CXXOperatorCallExpr
+ // that calls this method, using Object for the implicit object
+ // parameter and passing along the remaining arguments.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ const FunctionProtoType *Proto = Method->getType()->getAsFunctionProtoType();
+
+ unsigned NumArgsInProto = Proto->getNumArgs();
+ unsigned NumArgsToCheck = NumArgs;
+
+ // Build the full argument list for the method call (the
+ // implicit object parameter is placed at the beginning of the
+ // list).
+ Expr **MethodArgs;
+ if (NumArgs < NumArgsInProto) {
+ NumArgsToCheck = NumArgsInProto;
+ MethodArgs = new Expr*[NumArgsInProto + 1];
+ } else {
+ MethodArgs = new Expr*[NumArgs + 1];
+ }
+ MethodArgs[0] = Object;
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ MethodArgs[ArgIdx + 1] = Args[ArgIdx];
+
+ Expr *NewFn = new (Context) DeclRefExpr(Method, Method->getType(),
+ SourceLocation());
+ UsualUnaryConversions(NewFn);
+
+ // Once we've built TheCall, all of the expressions are properly
+ // owned.
+ QualType ResultTy = Method->getResultType().getNonReferenceType();
+ ExprOwningPtr<CXXOperatorCallExpr>
+ TheCall(this, new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn,
+ MethodArgs, NumArgs + 1,
+ ResultTy, RParenLoc));
+ delete [] MethodArgs;
+
+ // We may have default arguments. If so, we need to allocate more
+ // slots in the call for them.
+ if (NumArgs < NumArgsInProto)
+ TheCall->setNumArgs(Context, NumArgsInProto + 1);
+ else if (NumArgs > NumArgsInProto)
+ NumArgsToCheck = NumArgsInProto;
+
+ bool IsError = false;
+
+ // Initialize the implicit object parameter.
+ IsError |= PerformObjectArgumentInitialization(Object, Method);
+ TheCall->setArg(0, Object);
+
+
+ // Check the argument types.
+ for (unsigned i = 0; i != NumArgsToCheck; i++) {
+ Expr *Arg;
+ if (i < NumArgs) {
+ Arg = Args[i];
+
+ // Pass the argument.
+ QualType ProtoArgType = Proto->getArgType(i);
+ IsError |= PerformCopyInitialization(Arg, ProtoArgType, "passing");
+ } else {
+ Arg = new (Context) CXXDefaultArgExpr(Method->getParamDecl(i));
+ }
+
+ TheCall->setArg(i + 1, Arg);
+ }
+
+ // If this is a variadic call, handle args passed through "...".
+ if (Proto->isVariadic()) {
+ // Promote the arguments (C99 6.5.2.2p7).
+ for (unsigned i = NumArgsInProto; i != NumArgs; i++) {
+ Expr *Arg = Args[i];
+ IsError |= DefaultVariadicArgumentPromotion(Arg, VariadicMethod);
+ TheCall->setArg(i + 1, Arg);
+ }
+ }
+
+ if (IsError) return true;
+
+ return CheckFunctionCall(Method, TheCall.take()).release();
+}
+
+/// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator->
+/// (if one exists), where @c Base is an expression of class type and
+/// @c Member is the name of the member we're trying to find.
+Action::ExprResult
+Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
+ SourceLocation MemberLoc,
+ IdentifierInfo &Member) {
+ assert(Base->getType()->isRecordType() && "left-hand side must have class type");
+
+ // C++ [over.ref]p1:
+ //
+ // [...] An expression x->m is interpreted as (x.operator->())->m
+ // for a class object x of type T if T::operator->() exists and if
+ // the operator is selected as the best match function by the
+ // overload resolution mechanism (13.3).
+ // FIXME: look in base classes.
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Arrow);
+ OverloadCandidateSet CandidateSet;
+ const RecordType *BaseRecord = Base->getType()->getAsRecordType();
+
+ DeclContext::lookup_const_iterator Oper, OperEnd;
+ for (llvm::tie(Oper, OperEnd)
+ = BaseRecord->getDecl()->lookup(Context, OpName);
+ Oper != OperEnd; ++Oper)
+ AddMethodCandidate(cast<CXXMethodDecl>(*Oper), Base, 0, 0, CandidateSet,
+ /*SuppressUserConversions=*/false);
+
+ ExprOwningPtr<Expr> BasePtr(this, Base);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (BestViableFunction(CandidateSet, Best)) {
+ case OR_Success:
+ // Overload resolution succeeded; we'll build the call below.
+ break;
+
+ case OR_No_Viable_Function:
+ if (CandidateSet.empty())
+ Diag(OpLoc, diag::err_typecheck_member_reference_arrow)
+ << BasePtr->getType() << BasePtr->getSourceRange();
+ else
+ Diag(OpLoc, diag::err_ovl_no_viable_oper)
+ << "operator->" << BasePtr->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/false);
+ return true;
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper)
+ << "operator->" << BasePtr->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return true;
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << "operator->" << BasePtr->getSourceRange();
+ PrintOverloadCandidates(CandidateSet, /*OnlyViable=*/true);
+ return true;
+ }
+
+ // Convert the object parameter.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ if (PerformObjectArgumentInitialization(Base, Method))
+ return true;
+
+ // No concerns about early exits now.
+ BasePtr.take();
+
+ // Build the operator call.
+ Expr *FnExpr = new (Context) DeclRefExpr(Method, Method->getType(),
+ SourceLocation());
+ UsualUnaryConversions(FnExpr);
+ Base = new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr, &Base, 1,
+ Method->getResultType().getNonReferenceType(),
+ OpLoc);
+ return ActOnMemberReferenceExpr(S, ExprArg(*this, Base), OpLoc, tok::arrow,
+ MemberLoc, Member, DeclPtrTy()).release();
+}
+
+/// FixOverloadedFunctionReference - E is an expression that refers to
+/// a C++ overloaded function (possibly with some parentheses and
+/// perhaps a '&' around it). We have resolved the overloaded function
+/// to the function declaration Fn, so patch up the expression E to
+/// refer (possibly indirectly) to Fn.
+void Sema::FixOverloadedFunctionReference(Expr *E, FunctionDecl *Fn) {
+ if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ FixOverloadedFunctionReference(PE->getSubExpr(), Fn);
+ E->setType(PE->getSubExpr()->getType());
+ } else if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) {
+ assert(UnOp->getOpcode() == UnaryOperator::AddrOf &&
+ "Can only take the address of an overloaded function");
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
+ if (Method->isStatic()) {
+ // Do nothing: static member functions aren't any different
+ // from non-member functions.
+ }
+ else if (QualifiedDeclRefExpr *DRE
+ = dyn_cast<QualifiedDeclRefExpr>(UnOp->getSubExpr())) {
+ // We have taken the address of a pointer to member
+ // function. Perform the computation here so that we get the
+ // appropriate pointer to member type.
+ DRE->setDecl(Fn);
+ DRE->setType(Fn->getType());
+ QualType ClassType
+ = Context.getTypeDeclType(cast<RecordDecl>(Method->getDeclContext()));
+ E->setType(Context.getMemberPointerType(Fn->getType(),
+ ClassType.getTypePtr()));
+ return;
+ }
+ }
+ FixOverloadedFunctionReference(UnOp->getSubExpr(), Fn);
+ E->setType(Context.getPointerType(UnOp->getSubExpr()->getType()));
+ } else if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
+ assert(isa<OverloadedFunctionDecl>(DR->getDecl()) &&
+ "Expected overloaded function");
+ DR->setDecl(Fn);
+ E->setType(Fn->getType());
+ } else if (MemberExpr *MemExpr = dyn_cast<MemberExpr>(E)) {
+ MemExpr->setMemberDecl(Fn);
+ E->setType(Fn->getType());
+ } else {
+ assert(false && "Invalid reference to overloaded function");
+ }
+}
+
+} // end namespace clang
diff --git a/lib/Sema/SemaOverload.h b/lib/Sema/SemaOverload.h
new file mode 100644
index 0000000..9de3806
--- /dev/null
+++ b/lib/Sema/SemaOverload.h
@@ -0,0 +1,263 @@
+//===--- Overload.h - C++ Overloading ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the data structures and types used in C++
+// overload resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_OVERLOAD_H
+#define LLVM_CLANG_SEMA_OVERLOAD_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+ class CXXConstructorDecl;
+ class FunctionDecl;
+
+ /// ImplicitConversionKind - The kind of implicit conversion used to
+ /// convert an argument to a parameter's type. The enumerator values
+ /// match with Table 9 of (C++ 13.3.3.1.1) and are listed such that
+ /// better conversion kinds have smaller values.
+ enum ImplicitConversionKind {
+ ICK_Identity = 0, ///< Identity conversion (no conversion)
+ ICK_Lvalue_To_Rvalue, ///< Lvalue-to-rvalue conversion (C++ 4.1)
+ ICK_Array_To_Pointer, ///< Array-to-pointer conversion (C++ 4.2)
+ ICK_Function_To_Pointer, ///< Function-to-pointer (C++ 4.3)
+ ICK_Qualification, ///< Qualification conversions (C++ 4.4)
+ ICK_Integral_Promotion, ///< Integral promotions (C++ 4.5)
+ ICK_Floating_Promotion, ///< Floating point promotions (C++ 4.6)
+ ICK_Complex_Promotion, ///< Complex promotions (Clang extension)
+ ICK_Integral_Conversion, ///< Integral conversions (C++ 4.7)
+ ICK_Floating_Conversion, ///< Floating point conversions (C++ 4.8)
+ ICK_Complex_Conversion, ///< Complex conversions (C99 6.3.1.6)
+ ICK_Floating_Integral, ///< Floating-integral conversions (C++ 4.9)
+ ICK_Complex_Real, ///< Complex-real conversions (C99 6.3.1.7)
+ ICK_Pointer_Conversion, ///< Pointer conversions (C++ 4.10)
+ ICK_Pointer_Member, ///< Pointer-to-member conversions (C++ 4.11)
+ ICK_Boolean_Conversion, ///< Boolean conversions (C++ 4.12)
+ ICK_Compatible_Conversion, ///< Conversions between compatible types in C99
+ ICK_Derived_To_Base, ///< Derived-to-base (C++ [over.best.ics])
+ ICK_Num_Conversion_Kinds ///< The number of conversion kinds
+ };
+
+ /// ImplicitConversionCategory - The category of an implicit
+ /// conversion kind. The enumerator values match with Table 9 of
+ /// (C++ 13.3.3.1.1) and are listed such that better conversion
+ /// categories have smaller values.
+ enum ImplicitConversionCategory {
+ ICC_Identity = 0, ///< Identity
+ ICC_Lvalue_Transformation, ///< Lvalue transformation
+ ICC_Qualification_Adjustment, ///< Qualification adjustment
+ ICC_Promotion, ///< Promotion
+ ICC_Conversion ///< Conversion
+ };
+
+ ImplicitConversionCategory
+ GetConversionCategory(ImplicitConversionKind Kind);
+
+ /// ImplicitConversionRank - The rank of an implicit conversion
+ /// kind. The enumerator values match with Table 9 of (C++
+ /// 13.3.3.1.1) and are listed such that better conversion ranks
+ /// have smaller values.
+ enum ImplicitConversionRank {
+ ICR_Exact_Match = 0, ///< Exact Match
+ ICR_Promotion, ///< Promotion
+ ICR_Conversion ///< Conversion
+ };
+
+ ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind);
+
+ /// StandardConversionSequence - represents a standard conversion
+ /// sequence (C++ 13.3.3.1.1). A standard conversion sequence
+ /// contains between zero and three conversions. If a particular
+ /// conversion is not needed, it will be set to the identity conversion
+ /// (ICK_Identity). Note that the three conversions are
+ /// specified as separate members (rather than in an array) so that
+ /// we can keep the size of a standard conversion sequence to a
+ /// single word.
+ struct StandardConversionSequence {
+ /// First -- The first conversion can be an lvalue-to-rvalue
+ /// conversion, array-to-pointer conversion, or
+ /// function-to-pointer conversion.
+ ImplicitConversionKind First : 8;
+
+ /// Second - The second conversion can be an integral promotion,
+ /// floating point promotion, integral conversion, floating point
+ /// conversion, floating-integral conversion, pointer conversion,
+ /// pointer-to-member conversion, or boolean conversion.
+ ImplicitConversionKind Second : 8;
+
+ /// Third - The third conversion can be a qualification conversion.
+ ImplicitConversionKind Third : 8;
+
+ /// Deprecated - Whether this the deprecated conversion of a
+ /// string literal to a pointer to non-const character data
+ /// (C++ 4.2p2).
+ bool Deprecated : 1;
+
+ /// IncompatibleObjC - Whether this is an Objective-C conversion
+ /// that we should warn about (if we actually use it).
+ bool IncompatibleObjC : 1;
+
+ /// ReferenceBinding - True when this is a reference binding
+ /// (C++ [over.ics.ref]).
+ bool ReferenceBinding : 1;
+
+ /// DirectBinding - True when this is a reference binding that is a
+ /// direct binding (C++ [dcl.init.ref]).
+ bool DirectBinding : 1;
+
+ /// RRefBinding - True when this is a reference binding of an rvalue
+ /// reference to an rvalue (C++0x [over.ics.rank]p3b4).
+ bool RRefBinding : 1;
+
+ /// FromType - The type that this conversion is converting
+ /// from. This is an opaque pointer that can be translated into a
+ /// QualType.
+ void *FromTypePtr;
+
+ /// ToType - The type that this conversion is converting to. This
+ /// is an opaque pointer that can be translated into a QualType.
+ void *ToTypePtr;
+
+ /// CopyConstructor - The copy constructor that is used to perform
+ /// this conversion, when the conversion is actually just the
+ /// initialization of an object via copy constructor. Such
+ /// conversions are either identity conversions or derived-to-base
+ /// conversions.
+ CXXConstructorDecl *CopyConstructor;
+
+ void setAsIdentityConversion();
+ ImplicitConversionRank getRank() const;
+ bool isPointerConversionToBool() const;
+ bool isPointerConversionToVoidPointer(ASTContext& Context) const;
+ void DebugPrint() const;
+ };
+
+ /// UserDefinedConversionSequence - Represents a user-defined
+ /// conversion sequence (C++ 13.3.3.1.2).
+ struct UserDefinedConversionSequence {
+ /// Before - Represents the standard conversion that occurs before
+ /// the actual user-defined conversion. (C++ 13.3.3.1.2p1):
+ ///
+ /// If the user-defined conversion is specified by a constructor
+ /// (12.3.1), the initial standard conversion sequence converts
+ /// the source type to the type required by the argument of the
+ /// constructor. If the user-defined conversion is specified by
+ /// a conversion function (12.3.2), the initial standard
+ /// conversion sequence converts the source type to the implicit
+ /// object parameter of the conversion function.
+ StandardConversionSequence Before;
+
+ /// After - Represents the standard conversion that occurs after
+ /// the actual user-defined conversion.
+ StandardConversionSequence After;
+
+ /// ConversionFunction - The function that will perform the
+ /// user-defined conversion.
+ FunctionDecl* ConversionFunction;
+
+ void DebugPrint() const;
+ };
+
+ /// ImplicitConversionSequence - Represents an implicit conversion
+ /// sequence, which may be a standard conversion sequence
+ /// (C++ 13.3.3.1.1), user-defined conversion sequence (C++ 13.3.3.1.2),
+ /// or an ellipsis conversion sequence (C++ 13.3.3.1.3).
+ struct ImplicitConversionSequence {
+ /// Kind - The kind of implicit conversion sequence. BadConversion
+ /// specifies that there is no conversion from the source type to
+ /// the target type. The enumerator values are ordered such that
+ /// better implicit conversions have smaller values.
+ enum Kind {
+ StandardConversion = 0,
+ UserDefinedConversion,
+ EllipsisConversion,
+ BadConversion
+ };
+
+ /// ConversionKind - The kind of implicit conversion sequence.
+ Kind ConversionKind;
+
+ union {
+ /// When ConversionKind == StandardConversion, provides the
+ /// details of the standard conversion sequence.
+ StandardConversionSequence Standard;
+
+ /// When ConversionKind == UserDefinedConversion, provides the
+ /// details of the user-defined conversion sequence.
+ UserDefinedConversionSequence UserDefined;
+ };
+
+ // The result of a comparison between implicit conversion
+ // sequences. Use Sema::CompareImplicitConversionSequences to
+ // actually perform the comparison.
+ enum CompareKind {
+ Better = -1,
+ Indistinguishable = 0,
+ Worse = 1
+ };
+
+ void DebugPrint() const;
+ };
+
+ /// OverloadCandidate - A single candidate in an overload set (C++ 13.3).
+ struct OverloadCandidate {
+ /// Function - The actual function that this candidate
+ /// represents. When NULL, this is a built-in candidate
+ /// (C++ [over.oper]) or a surrogate for a conversion to a
+ /// function pointer or reference (C++ [over.call.object]).
+ FunctionDecl *Function;
+
+ // BuiltinTypes - Provides the return and parameter types of a
+ // built-in overload candidate. Only valid when Function is NULL.
+ struct {
+ QualType ResultTy;
+ QualType ParamTypes[3];
+ } BuiltinTypes;
+
+ /// Surrogate - The conversion function for which this candidate
+ /// is a surrogate, but only if IsSurrogate is true.
+ CXXConversionDecl *Surrogate;
+
+ /// Conversions - The conversion sequences used to convert the
+ /// function arguments to the function parameters.
+ llvm::SmallVector<ImplicitConversionSequence, 4> Conversions;
+
+ /// Viable - True to indicate that this overload candidate is viable.
+ bool Viable;
+
+ /// IsSurrogate - True to indicate that this candidate is a
+ /// surrogate for a conversion to a function pointer or reference
+ /// (C++ [over.call.object]).
+ bool IsSurrogate;
+
+ /// IgnoreObjectArgument - True to indicate that the first
+ /// argument's conversion, which for this function represents the
+ /// implicit object argument, should be ignored. This will be true
+ /// when the candidate is a static member function (where the
+ /// implicit object argument is just a placeholder) or a
+ /// non-static member function when the call doesn't have an
+ /// object argument.
+ bool IgnoreObjectArgument;
+
+ /// FinalConversion - For a conversion function (where Function is
+ /// a CXXConversionDecl), the standard conversion that occurs
+ /// after the call to the overload candidate to convert the result
+ /// of calling the conversion function to the required type.
+ StandardConversionSequence FinalConversion;
+ };
+
+ /// OverloadCandidateSet - A set of overload candidates, used in C++
+ /// overload resolution (C++ 13.3).
+ typedef llvm::SmallVector<OverloadCandidate, 16> OverloadCandidateSet;
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_OVERLOAD_H
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
new file mode 100644
index 0000000..15262e9
--- /dev/null
+++ b/lib/Sema/SemaStmt.cpp
@@ -0,0 +1,1266 @@
+//===--- SemaStmt.cpp - Semantic Analysis for Statements ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for statements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/TargetInfo.h"
+using namespace clang;
+
+Sema::OwningStmtResult Sema::ActOnExprStmt(FullExprArg expr) {
+ Expr *E = expr->takeAs<Expr>();
+ assert(E && "ActOnExprStmt(): missing expression");
+
+ // C99 6.8.3p2: The expression in an expression statement is evaluated as a
+ // void expression for its side effects. Conversion to void allows any
+ // operand, even incomplete types.
+
+ // Same thing in for stmt first clause (when expr) and third clause.
+ return Owned(static_cast<Stmt*>(E));
+}
+
+
+Sema::OwningStmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc) {
+ return Owned(new (Context) NullStmt(SemiLoc));
+}
+
+Sema::OwningStmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ DeclGroupRef DG = dg.getAsVal<DeclGroupRef>();
+
+ // If we have an invalid decl, just return an error.
+ if (DG.isNull()) return StmtError();
+
+ return Owned(new (Context) DeclStmt(DG, StartLoc, EndLoc));
+}
+
+Action::OwningStmtResult
+Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
+ MultiStmtArg elts, bool isStmtExpr) {
+ unsigned NumElts = elts.size();
+ Stmt **Elts = reinterpret_cast<Stmt**>(elts.release());
+ // If we're in C89 mode, check that we don't have any decls after stmts. If
+ // so, emit an extension diagnostic.
+ if (!getLangOptions().C99 && !getLangOptions().CPlusPlus) {
+ // Note that __extension__ can be around a decl.
+ unsigned i = 0;
+ // Skip over all declarations.
+ for (; i != NumElts && isa<DeclStmt>(Elts[i]); ++i)
+ /*empty*/;
+
+ // We found the end of the list or a statement. Scan for another declstmt.
+ for (; i != NumElts && !isa<DeclStmt>(Elts[i]); ++i)
+ /*empty*/;
+
+ if (i != NumElts) {
+ Decl *D = *cast<DeclStmt>(Elts[i])->decl_begin();
+ Diag(D->getLocation(), diag::ext_mixed_decls_code);
+ }
+ }
+ // Warn about unused expressions in statements.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ Expr *E = dyn_cast<Expr>(Elts[i]);
+ if (!E) continue;
+
+ // Warn about expressions with unused results if they are non-void and if
+ // this not the last stmt in a stmt expr.
+ if (E->getType()->isVoidType() || (isStmtExpr && i == NumElts-1))
+ continue;
+
+ SourceLocation Loc;
+ SourceRange R1, R2;
+ if (!E->isUnusedResultAWarning(Loc, R1, R2))
+ continue;
+
+ Diag(Loc, diag::warn_unused_expr) << R1 << R2;
+ }
+
+ return Owned(new (Context) CompoundStmt(Context, Elts, NumElts, L, R));
+}
+
+Action::OwningStmtResult
+Sema::ActOnCaseStmt(SourceLocation CaseLoc, ExprArg lhsval,
+ SourceLocation DotDotDotLoc, ExprArg rhsval,
+ SourceLocation ColonLoc) {
+ assert((lhsval.get() != 0) && "missing expression in case statement");
+
+ // C99 6.8.4.2p3: The expression shall be an integer constant.
+ // However, GCC allows any evaluatable integer expression.
+ Expr *LHSVal = static_cast<Expr*>(lhsval.get());
+ if (!LHSVal->isTypeDependent() && !LHSVal->isValueDependent() &&
+ VerifyIntegerConstantExpression(LHSVal))
+ return StmtError();
+
+ // GCC extension: The expression shall be an integer constant.
+
+ Expr *RHSVal = static_cast<Expr*>(rhsval.get());
+ if (RHSVal && !RHSVal->isTypeDependent() && !RHSVal->isValueDependent() &&
+ VerifyIntegerConstantExpression(RHSVal)) {
+ RHSVal = 0; // Recover by just forgetting about it.
+ rhsval = 0;
+ }
+
+ if (getSwitchStack().empty()) {
+ Diag(CaseLoc, diag::err_case_not_in_switch);
+ return StmtError();
+ }
+
+ // Only now release the smart pointers.
+ lhsval.release();
+ rhsval.release();
+ CaseStmt *CS = new (Context) CaseStmt(LHSVal, RHSVal, CaseLoc, DotDotDotLoc,
+ ColonLoc);
+ getSwitchStack().back()->addSwitchCase(CS);
+ return Owned(CS);
+}
+
+/// ActOnCaseStmtBody - This installs a statement as the body of a case.
+void Sema::ActOnCaseStmtBody(StmtTy *caseStmt, StmtArg subStmt) {
+ CaseStmt *CS = static_cast<CaseStmt*>(caseStmt);
+ Stmt *SubStmt = subStmt.takeAs<Stmt>();
+ CS->setSubStmt(SubStmt);
+}
+
+Action::OwningStmtResult
+Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
+ StmtArg subStmt, Scope *CurScope) {
+ Stmt *SubStmt = subStmt.takeAs<Stmt>();
+
+ if (getSwitchStack().empty()) {
+ Diag(DefaultLoc, diag::err_default_not_in_switch);
+ return Owned(SubStmt);
+ }
+
+ DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt);
+ getSwitchStack().back()->addSwitchCase(DS);
+ return Owned(DS);
+}
+
+Action::OwningStmtResult
+Sema::ActOnLabelStmt(SourceLocation IdentLoc, IdentifierInfo *II,
+ SourceLocation ColonLoc, StmtArg subStmt) {
+ Stmt *SubStmt = subStmt.takeAs<Stmt>();
+ // Look up the record for this label identifier.
+ LabelStmt *&LabelDecl = getLabelMap()[II];
+
+ // If not forward referenced or defined already, just create a new LabelStmt.
+ if (LabelDecl == 0)
+ return Owned(LabelDecl = new (Context) LabelStmt(IdentLoc, II, SubStmt));
+
+ assert(LabelDecl->getID() == II && "Label mismatch!");
+
+ // Otherwise, this label was either forward reference or multiply defined. If
+ // multiply defined, reject it now.
+ if (LabelDecl->getSubStmt()) {
+ Diag(IdentLoc, diag::err_redefinition_of_label) << LabelDecl->getID();
+ Diag(LabelDecl->getIdentLoc(), diag::note_previous_definition);
+ return Owned(SubStmt);
+ }
+
+ // Otherwise, this label was forward declared, and we just found its real
+ // definition. Fill in the forward definition and return it.
+ LabelDecl->setIdentLoc(IdentLoc);
+ LabelDecl->setSubStmt(SubStmt);
+ return Owned(LabelDecl);
+}
+
+Action::OwningStmtResult
+Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal,
+ StmtArg ThenVal, SourceLocation ElseLoc,
+ StmtArg ElseVal) {
+ OwningExprResult CondResult(CondVal.release());
+
+ Expr *condExpr = CondResult.takeAs<Expr>();
+
+ assert(condExpr && "ActOnIfStmt(): missing expression");
+
+ if (!condExpr->isTypeDependent()) {
+ DefaultFunctionArrayConversion(condExpr);
+ // Take ownership again until we're past the error checking.
+ CondResult = condExpr;
+ QualType condType = condExpr->getType();
+
+ if (getLangOptions().CPlusPlus) {
+ if (CheckCXXBooleanCondition(condExpr)) // C++ 6.4p4
+ return StmtError();
+ } else if (!condType->isScalarType()) // C99 6.8.4.1p1
+ return StmtError(Diag(IfLoc,
+ diag::err_typecheck_statement_requires_scalar)
+ << condType << condExpr->getSourceRange());
+ }
+
+ Stmt *thenStmt = ThenVal.takeAs<Stmt>();
+
+ // Warn if the if block has a null body without an else value.
+ // this helps prevent bugs due to typos, such as
+ // if (condition);
+ // do_stuff();
+ if (!ElseVal.get()) {
+ if (NullStmt* stmt = dyn_cast<NullStmt>(thenStmt))
+ Diag(stmt->getSemiLoc(), diag::warn_empty_if_body);
+ }
+
+ CondResult.release();
+ return Owned(new (Context) IfStmt(IfLoc, condExpr, thenStmt,
+ ElseLoc, ElseVal.takeAs<Stmt>()));
+}
+
+Action::OwningStmtResult
+Sema::ActOnStartOfSwitchStmt(ExprArg cond) {
+ Expr *Cond = cond.takeAs<Expr>();
+
+ if (getLangOptions().CPlusPlus) {
+ // C++ 6.4.2.p2:
+ // The condition shall be of integral type, enumeration type, or of a class
+ // type for which a single conversion function to integral or enumeration
+ // type exists (12.3). If the condition is of class type, the condition is
+ // converted by calling that conversion function, and the result of the
+ // conversion is used in place of the original condition for the remainder
+ // of this section. Integral promotions are performed.
+ if (!Cond->isTypeDependent()) {
+ QualType Ty = Cond->getType();
+
+ // FIXME: Handle class types.
+
+ // If the type is wrong a diagnostic will be emitted later at
+ // ActOnFinishSwitchStmt.
+ if (Ty->isIntegralType() || Ty->isEnumeralType()) {
+ // Integral promotions are performed.
+ // FIXME: Integral promotions for C++ are not complete.
+ UsualUnaryConversions(Cond);
+ }
+ }
+ } else {
+ // C99 6.8.4.2p5 - Integer promotions are performed on the controlling expr.
+ UsualUnaryConversions(Cond);
+ }
+
+ SwitchStmt *SS = new (Context) SwitchStmt(Cond);
+ getSwitchStack().push_back(SS);
+ return Owned(SS);
+}
+
+/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
+/// the specified width and sign. If an overflow occurs, detect it and emit
+/// the specified diagnostic.
+void Sema::ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &Val,
+ unsigned NewWidth, bool NewSign,
+ SourceLocation Loc,
+ unsigned DiagID) {
+ // Perform a conversion to the promoted condition type if needed.
+ if (NewWidth > Val.getBitWidth()) {
+ // If this is an extension, just do it.
+ llvm::APSInt OldVal(Val);
+ Val.extend(NewWidth);
+
+ // If the input was signed and negative and the output is unsigned,
+ // warn.
+ if (!NewSign && OldVal.isSigned() && OldVal.isNegative())
+ Diag(Loc, DiagID) << OldVal.toString(10) << Val.toString(10);
+
+ Val.setIsSigned(NewSign);
+ } else if (NewWidth < Val.getBitWidth()) {
+ // If this is a truncation, check for overflow.
+ llvm::APSInt ConvVal(Val);
+ ConvVal.trunc(NewWidth);
+ ConvVal.setIsSigned(NewSign);
+ ConvVal.extend(Val.getBitWidth());
+ ConvVal.setIsSigned(Val.isSigned());
+ if (ConvVal != Val)
+ Diag(Loc, DiagID) << Val.toString(10) << ConvVal.toString(10);
+
+ // Regardless of whether a diagnostic was emitted, really do the
+ // truncation.
+ Val.trunc(NewWidth);
+ Val.setIsSigned(NewSign);
+ } else if (NewSign != Val.isSigned()) {
+ // Convert the sign to match the sign of the condition. This can cause
+ // overflow as well: unsigned(INTMIN)
+ llvm::APSInt OldVal(Val);
+ Val.setIsSigned(NewSign);
+
+ if (Val.isNegative()) // Sign bit changes meaning.
+ Diag(Loc, DiagID) << OldVal.toString(10) << Val.toString(10);
+ }
+}
+
+namespace {
+ struct CaseCompareFunctor {
+ bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
+ const llvm::APSInt &RHS) {
+ return LHS.first < RHS;
+ }
+ bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
+ const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
+ return LHS.first < RHS.first;
+ }
+ bool operator()(const llvm::APSInt &LHS,
+ const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
+ return LHS < RHS.first;
+ }
+ };
+}
+
+/// CmpCaseVals - Comparison predicate for sorting case values.
+///
+static bool CmpCaseVals(const std::pair<llvm::APSInt, CaseStmt*>& lhs,
+ const std::pair<llvm::APSInt, CaseStmt*>& rhs) {
+ if (lhs.first < rhs.first)
+ return true;
+
+ if (lhs.first == rhs.first &&
+ lhs.second->getCaseLoc().getRawEncoding()
+ < rhs.second->getCaseLoc().getRawEncoding())
+ return true;
+ return false;
+}
+
+Action::OwningStmtResult
+Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, StmtArg Switch,
+ StmtArg Body) {
+ Stmt *BodyStmt = Body.takeAs<Stmt>();
+
+ SwitchStmt *SS = getSwitchStack().back();
+ assert(SS == (SwitchStmt*)Switch.get() && "switch stack missing push/pop!");
+
+ SS->setBody(BodyStmt, SwitchLoc);
+ getSwitchStack().pop_back();
+
+ Expr *CondExpr = SS->getCond();
+ QualType CondType = CondExpr->getType();
+
+ if (!CondExpr->isTypeDependent() &&
+ !CondType->isIntegerType()) { // C99 6.8.4.2p1
+ Diag(SwitchLoc, diag::err_typecheck_statement_requires_integer)
+ << CondType << CondExpr->getSourceRange();
+ return StmtError();
+ }
+
+ // Get the bitwidth of the switched-on value before promotions. We must
+ // convert the integer case values to this width before comparison.
+ bool HasDependentValue
+ = CondExpr->isTypeDependent() || CondExpr->isValueDependent();
+ unsigned CondWidth
+ = HasDependentValue? 0
+ : static_cast<unsigned>(Context.getTypeSize(CondType));
+ bool CondIsSigned = CondType->isSignedIntegerType();
+
+ // Accumulate all of the case values in a vector so that we can sort them
+ // and detect duplicates. This vector contains the APInt for the case after
+ // it has been converted to the condition type.
+ typedef llvm::SmallVector<std::pair<llvm::APSInt, CaseStmt*>, 64> CaseValsTy;
+ CaseValsTy CaseVals;
+
+ // Keep track of any GNU case ranges we see. The APSInt is the low value.
+ std::vector<std::pair<llvm::APSInt, CaseStmt*> > CaseRanges;
+
+ DefaultStmt *TheDefaultStmt = 0;
+
+ bool CaseListIsErroneous = false;
+
+ for (SwitchCase *SC = SS->getSwitchCaseList(); SC && !HasDependentValue;
+ SC = SC->getNextSwitchCase()) {
+
+ if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC)) {
+ if (TheDefaultStmt) {
+ Diag(DS->getDefaultLoc(), diag::err_multiple_default_labels_defined);
+ Diag(TheDefaultStmt->getDefaultLoc(), diag::note_duplicate_case_prev);
+
+ // FIXME: Remove the default statement from the switch block so that
+ // we'll return a valid AST. This requires recursing down the AST and
+ // finding it, not something we are set up to do right now. For now,
+ // just lop the entire switch stmt out of the AST.
+ CaseListIsErroneous = true;
+ }
+ TheDefaultStmt = DS;
+
+ } else {
+ CaseStmt *CS = cast<CaseStmt>(SC);
+
+ // We already verified that the expression has a i-c-e value (C99
+ // 6.8.4.2p3) - get that value now.
+ Expr *Lo = CS->getLHS();
+
+ if (Lo->isTypeDependent() || Lo->isValueDependent()) {
+ HasDependentValue = true;
+ break;
+ }
+
+ llvm::APSInt LoVal = Lo->EvaluateAsInt(Context);
+
+ // Convert the value to the same width/sign as the condition.
+ ConvertIntegerToTypeWarnOnOverflow(LoVal, CondWidth, CondIsSigned,
+ CS->getLHS()->getLocStart(),
+ diag::warn_case_value_overflow);
+
+ // If the LHS is not the same type as the condition, insert an implicit
+ // cast.
+ ImpCastExprToType(Lo, CondType);
+ CS->setLHS(Lo);
+
+ // If this is a case range, remember it in CaseRanges, otherwise CaseVals.
+ if (CS->getRHS()) {
+ if (CS->getRHS()->isTypeDependent() ||
+ CS->getRHS()->isValueDependent()) {
+ HasDependentValue = true;
+ break;
+ }
+ CaseRanges.push_back(std::make_pair(LoVal, CS));
+ } else
+ CaseVals.push_back(std::make_pair(LoVal, CS));
+ }
+ }
+
+ if (!HasDependentValue) {
+ // Sort all the scalar case values so we can easily detect duplicates.
+ std::stable_sort(CaseVals.begin(), CaseVals.end(), CmpCaseVals);
+
+ if (!CaseVals.empty()) {
+ for (unsigned i = 0, e = CaseVals.size()-1; i != e; ++i) {
+ if (CaseVals[i].first == CaseVals[i+1].first) {
+ // If we have a duplicate, report it.
+ Diag(CaseVals[i+1].second->getLHS()->getLocStart(),
+ diag::err_duplicate_case) << CaseVals[i].first.toString(10);
+ Diag(CaseVals[i].second->getLHS()->getLocStart(),
+ diag::note_duplicate_case_prev);
+ // FIXME: We really want to remove the bogus case stmt from the
+ // substmt, but we have no way to do this right now.
+ CaseListIsErroneous = true;
+ }
+ }
+ }
+
+ // Detect duplicate case ranges, which usually don't exist at all in
+ // the first place.
+ if (!CaseRanges.empty()) {
+ // Sort all the case ranges by their low value so we can easily detect
+ // overlaps between ranges.
+ std::stable_sort(CaseRanges.begin(), CaseRanges.end());
+
+ // Scan the ranges, computing the high values and removing empty ranges.
+ std::vector<llvm::APSInt> HiVals;
+ for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
+ CaseStmt *CR = CaseRanges[i].second;
+ Expr *Hi = CR->getRHS();
+ llvm::APSInt HiVal = Hi->EvaluateAsInt(Context);
+
+ // Convert the value to the same width/sign as the condition.
+ ConvertIntegerToTypeWarnOnOverflow(HiVal, CondWidth, CondIsSigned,
+ CR->getRHS()->getLocStart(),
+ diag::warn_case_value_overflow);
+
+ // If the LHS is not the same type as the condition, insert an implicit
+ // cast.
+ ImpCastExprToType(Hi, CondType);
+ CR->setRHS(Hi);
+
+ // If the low value is bigger than the high value, the case is empty.
+ if (CaseRanges[i].first > HiVal) {
+ Diag(CR->getLHS()->getLocStart(), diag::warn_case_empty_range)
+ << SourceRange(CR->getLHS()->getLocStart(),
+ CR->getRHS()->getLocEnd());
+ CaseRanges.erase(CaseRanges.begin()+i);
+ --i, --e;
+ continue;
+ }
+ HiVals.push_back(HiVal);
+ }
+
+ // Rescan the ranges, looking for overlap with singleton values and other
+ // ranges. Since the range list is sorted, we only need to compare case
+ // ranges with their neighbors.
+ for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
+ llvm::APSInt &CRLo = CaseRanges[i].first;
+ llvm::APSInt &CRHi = HiVals[i];
+ CaseStmt *CR = CaseRanges[i].second;
+
+ // Check to see whether the case range overlaps with any
+ // singleton cases.
+ CaseStmt *OverlapStmt = 0;
+ llvm::APSInt OverlapVal(32);
+
+ // Find the smallest value >= the lower bound. If I is in the
+ // case range, then we have overlap.
+ CaseValsTy::iterator I = std::lower_bound(CaseVals.begin(),
+ CaseVals.end(), CRLo,
+ CaseCompareFunctor());
+ if (I != CaseVals.end() && I->first < CRHi) {
+ OverlapVal = I->first; // Found overlap with scalar.
+ OverlapStmt = I->second;
+ }
+
+ // Find the smallest value bigger than the upper bound.
+ I = std::upper_bound(I, CaseVals.end(), CRHi, CaseCompareFunctor());
+ if (I != CaseVals.begin() && (I-1)->first >= CRLo) {
+ OverlapVal = (I-1)->first; // Found overlap with scalar.
+ OverlapStmt = (I-1)->second;
+ }
+
+ // Check to see if this case stmt overlaps with the subsequent
+ // case range.
+ if (i && CRLo <= HiVals[i-1]) {
+ OverlapVal = HiVals[i-1]; // Found overlap with range.
+ OverlapStmt = CaseRanges[i-1].second;
+ }
+
+ if (OverlapStmt) {
+ // If we have a duplicate, report it.
+ Diag(CR->getLHS()->getLocStart(), diag::err_duplicate_case)
+ << OverlapVal.toString(10);
+ Diag(OverlapStmt->getLHS()->getLocStart(),
+ diag::note_duplicate_case_prev);
+ // FIXME: We really want to remove the bogus case stmt from the
+ // substmt, but we have no way to do this right now.
+ CaseListIsErroneous = true;
+ }
+ }
+ }
+ }
+
+ // FIXME: If the case list was broken is some way, we don't have a good system
+ // to patch it up. Instead, just return the whole substmt as broken.
+ if (CaseListIsErroneous)
+ return StmtError();
+
+ Switch.release();
+ return Owned(SS);
+}
+
+Action::OwningStmtResult
+Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, StmtArg Body) {
+ ExprArg CondArg(Cond.release());
+ Expr *condExpr = CondArg.takeAs<Expr>();
+ assert(condExpr && "ActOnWhileStmt(): missing expression");
+
+ if (!condExpr->isTypeDependent()) {
+ DefaultFunctionArrayConversion(condExpr);
+ CondArg = condExpr;
+ QualType condType = condExpr->getType();
+
+ if (getLangOptions().CPlusPlus) {
+ if (CheckCXXBooleanCondition(condExpr)) // C++ 6.4p4
+ return StmtError();
+ } else if (!condType->isScalarType()) // C99 6.8.5p2
+ return StmtError(Diag(WhileLoc,
+ diag::err_typecheck_statement_requires_scalar)
+ << condType << condExpr->getSourceRange());
+ }
+
+ CondArg.release();
+ return Owned(new (Context) WhileStmt(condExpr, Body.takeAs<Stmt>(),
+ WhileLoc));
+}
+
+Action::OwningStmtResult
+Sema::ActOnDoStmt(SourceLocation DoLoc, StmtArg Body,
+ SourceLocation WhileLoc, ExprArg Cond) {
+ Expr *condExpr = Cond.takeAs<Expr>();
+ assert(condExpr && "ActOnDoStmt(): missing expression");
+
+ if (!condExpr->isTypeDependent()) {
+ DefaultFunctionArrayConversion(condExpr);
+ Cond = condExpr;
+ QualType condType = condExpr->getType();
+
+ if (getLangOptions().CPlusPlus) {
+ if (CheckCXXBooleanCondition(condExpr)) // C++ 6.4p4
+ return StmtError();
+ } else if (!condType->isScalarType()) // C99 6.8.5p2
+ return StmtError(Diag(DoLoc,
+ diag::err_typecheck_statement_requires_scalar)
+ << condType << condExpr->getSourceRange());
+ }
+
+ Cond.release();
+ return Owned(new (Context) DoStmt(Body.takeAs<Stmt>(), condExpr, DoLoc,
+ WhileLoc));
+}
+
+Action::OwningStmtResult
+Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+ StmtArg first, ExprArg second, ExprArg third,
+ SourceLocation RParenLoc, StmtArg body) {
+ Stmt *First = static_cast<Stmt*>(first.get());
+ Expr *Second = static_cast<Expr*>(second.get());
+ Expr *Third = static_cast<Expr*>(third.get());
+ Stmt *Body = static_cast<Stmt*>(body.get());
+
+ if (!getLangOptions().CPlusPlus) {
+ if (DeclStmt *DS = dyn_cast_or_null<DeclStmt>(First)) {
+ // C99 6.8.5p3: The declaration part of a 'for' statement shall only
+ // declare identifiers for objects having storage class 'auto' or
+ // 'register'.
+ for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
+ DI!=DE; ++DI) {
+ VarDecl *VD = dyn_cast<VarDecl>(*DI);
+ if (VD && VD->isBlockVarDecl() && !VD->hasLocalStorage())
+ VD = 0;
+ if (VD == 0)
+ Diag((*DI)->getLocation(), diag::err_non_variable_decl_in_for);
+ // FIXME: mark decl erroneous!
+ }
+ }
+ }
+ if (Second && !Second->isTypeDependent()) {
+ DefaultFunctionArrayConversion(Second);
+ QualType SecondType = Second->getType();
+
+ if (getLangOptions().CPlusPlus) {
+ if (CheckCXXBooleanCondition(Second)) // C++ 6.4p4
+ return StmtError();
+ } else if (!SecondType->isScalarType()) // C99 6.8.5p2
+ return StmtError(Diag(ForLoc,
+ diag::err_typecheck_statement_requires_scalar)
+ << SecondType << Second->getSourceRange());
+ }
+ first.release();
+ second.release();
+ third.release();
+ body.release();
+ return Owned(new (Context) ForStmt(First, Second, Third, Body, ForLoc,
+ LParenLoc, RParenLoc));
+}
+
+Action::OwningStmtResult
+Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
+ SourceLocation LParenLoc,
+ StmtArg first, ExprArg second,
+ SourceLocation RParenLoc, StmtArg body) {
+ Stmt *First = static_cast<Stmt*>(first.get());
+ Expr *Second = static_cast<Expr*>(second.get());
+ Stmt *Body = static_cast<Stmt*>(body.get());
+ if (First) {
+ QualType FirstType;
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) {
+ if (!DS->isSingleDecl())
+ return StmtError(Diag((*DS->decl_begin())->getLocation(),
+ diag::err_toomany_element_decls));
+
+ Decl *D = DS->getSingleDecl();
+ FirstType = cast<ValueDecl>(D)->getType();
+ // C99 6.8.5p3: The declaration part of a 'for' statement shall only
+ // declare identifiers for objects having storage class 'auto' or
+ // 'register'.
+ VarDecl *VD = cast<VarDecl>(D);
+ if (VD->isBlockVarDecl() && !VD->hasLocalStorage())
+ return StmtError(Diag(VD->getLocation(),
+ diag::err_non_variable_decl_in_for));
+ } else {
+ if (cast<Expr>(First)->isLvalue(Context) != Expr::LV_Valid)
+ return StmtError(Diag(First->getLocStart(),
+ diag::err_selector_element_not_lvalue)
+ << First->getSourceRange());
+
+ FirstType = static_cast<Expr*>(First)->getType();
+ }
+ if (!Context.isObjCObjectPointerType(FirstType))
+ Diag(ForLoc, diag::err_selector_element_type)
+ << FirstType << First->getSourceRange();
+ }
+ if (Second) {
+ DefaultFunctionArrayConversion(Second);
+ QualType SecondType = Second->getType();
+ if (!Context.isObjCObjectPointerType(SecondType))
+ Diag(ForLoc, diag::err_collection_expr_type)
+ << SecondType << Second->getSourceRange();
+ }
+ first.release();
+ second.release();
+ body.release();
+ return Owned(new (Context) ObjCForCollectionStmt(First, Second, Body,
+ ForLoc, RParenLoc));
+}
+
+Action::OwningStmtResult
+Sema::ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc,
+ IdentifierInfo *LabelII) {
+ // If we are in a block, reject all gotos for now.
+ if (CurBlock)
+ return StmtError(Diag(GotoLoc, diag::err_goto_in_block));
+
+ // Look up the record for this label identifier.
+ LabelStmt *&LabelDecl = getLabelMap()[LabelII];
+
+ // If we haven't seen this label yet, create a forward reference.
+ if (LabelDecl == 0)
+ LabelDecl = new (Context) LabelStmt(LabelLoc, LabelII, 0);
+
+ return Owned(new (Context) GotoStmt(LabelDecl, GotoLoc, LabelLoc));
+}
+
+Action::OwningStmtResult
+Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
+ ExprArg DestExp) {
+ // Convert operand to void*
+ Expr* E = DestExp.takeAs<Expr>();
+ if (!E->isTypeDependent()) {
+ QualType ETy = E->getType();
+ AssignConvertType ConvTy =
+ CheckSingleAssignmentConstraints(Context.VoidPtrTy, E);
+ if (DiagnoseAssignmentResult(ConvTy, StarLoc, Context.VoidPtrTy, ETy,
+ E, "passing"))
+ return StmtError();
+ }
+ return Owned(new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E));
+}
+
+Action::OwningStmtResult
+Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
+ Scope *S = CurScope->getContinueParent();
+ if (!S) {
+ // C99 6.8.6.2p1: A break shall appear only in or as a loop body.
+ return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop));
+ }
+
+ return Owned(new (Context) ContinueStmt(ContinueLoc));
+}
+
+Action::OwningStmtResult
+Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
+ Scope *S = CurScope->getBreakParent();
+ if (!S) {
+ // C99 6.8.6.3p1: A break shall appear only in or as a switch/loop body.
+ return StmtError(Diag(BreakLoc, diag::err_break_not_in_loop_or_switch));
+ }
+
+ return Owned(new (Context) BreakStmt(BreakLoc));
+}
+
+/// ActOnBlockReturnStmt - Utility routine to figure out block's return type.
+///
+Action::OwningStmtResult
+Sema::ActOnBlockReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+ // If this is the first return we've seen in the block, infer the type of
+ // the block from it.
+ if (CurBlock->ReturnType == 0) {
+ if (RetValExp) {
+ // Don't call UsualUnaryConversions(), since we don't want to do
+ // integer promotions here.
+ DefaultFunctionArrayConversion(RetValExp);
+ CurBlock->ReturnType = RetValExp->getType().getTypePtr();
+ } else
+ CurBlock->ReturnType = Context.VoidTy.getTypePtr();
+ }
+ QualType FnRetType = QualType(CurBlock->ReturnType, 0);
+
+ if (CurBlock->TheDecl->hasAttr<NoReturnAttr>()) {
+ Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr)
+ << getCurFunctionOrMethodDecl()->getDeclName();
+ return StmtError();
+ }
+
+ // Otherwise, verify that this result type matches the previous one. We are
+ // pickier with blocks than for normal functions because we don't have GCC
+ // compatibility to worry about here.
+ if (CurBlock->ReturnType->isVoidType()) {
+ if (RetValExp) {
+ Diag(ReturnLoc, diag::err_return_block_has_expr);
+ RetValExp->Destroy(Context);
+ RetValExp = 0;
+ }
+ return Owned(new (Context) ReturnStmt(ReturnLoc, RetValExp));
+ }
+
+ if (!RetValExp)
+ return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr));
+
+ if (!FnRetType->isDependentType() && !RetValExp->isTypeDependent()) {
+ // we have a non-void block with an expression, continue checking
+ QualType RetValType = RetValExp->getType();
+
+ // C99 6.8.6.4p3(136): The return statement is not an assignment. The
+ // overlap restriction of subclause 6.5.16.1 does not apply to the case of
+ // function return.
+
+ // In C++ the return statement is handled via a copy initialization.
+ // the C version of which boils down to CheckSingleAssignmentConstraints.
+ // FIXME: Leaks RetValExp.
+ if (PerformCopyInitialization(RetValExp, FnRetType, "returning"))
+ return StmtError();
+
+ if (RetValExp) CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
+ }
+
+ return Owned(new (Context) ReturnStmt(ReturnLoc, RetValExp));
+}
+
+/// IsReturnCopyElidable - Whether returning @p RetExpr from a function that
+/// returns a @p RetType fulfills the criteria for copy elision (C++0x 12.8p15).
+static bool IsReturnCopyElidable(ASTContext &Ctx, QualType RetType,
+ Expr *RetExpr) {
+ QualType ExprType = RetExpr->getType();
+ // - in a return statement in a function with ...
+ // ... a class return type ...
+ if (!RetType->isRecordType())
+ return false;
+ // ... the same cv-unqualified type as the function return type ...
+ if (Ctx.getCanonicalType(RetType).getUnqualifiedType() !=
+ Ctx.getCanonicalType(ExprType).getUnqualifiedType())
+ return false;
+ // ... the expression is the name of a non-volatile automatic object ...
+ // We ignore parentheses here.
+ // FIXME: Is this compliant?
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(RetExpr->IgnoreParens());
+ if (!DR)
+ return false;
+ const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ if (!VD)
+ return false;
+ return VD->hasLocalStorage() && !VD->getType()->isReferenceType()
+ && !VD->getType().isVolatileQualified();
+}
+
+Action::OwningStmtResult
+Sema::ActOnReturnStmt(SourceLocation ReturnLoc, FullExprArg rex) {
+ Expr *RetValExp = rex->takeAs<Expr>();
+ if (CurBlock)
+ return ActOnBlockReturnStmt(ReturnLoc, RetValExp);
+
+ QualType FnRetType;
+ if (const FunctionDecl *FD = getCurFunctionDecl()) {
+ FnRetType = FD->getResultType();
+ if (FD->hasAttr<NoReturnAttr>())
+ Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
+ << getCurFunctionOrMethodDecl()->getDeclName();
+ } else if (ObjCMethodDecl *MD = getCurMethodDecl())
+ FnRetType = MD->getResultType();
+ else // If we don't have a function/method context, bail.
+ return StmtError();
+
+ if (FnRetType->isVoidType()) {
+ if (RetValExp) {// C99 6.8.6.4p1 (ext_ since GCC warns)
+ unsigned D = diag::ext_return_has_expr;
+ if (RetValExp->getType()->isVoidType())
+ D = diag::ext_return_has_void_expr;
+
+ // return (some void expression); is legal in C++.
+ if (D != diag::ext_return_has_void_expr ||
+ !getLangOptions().CPlusPlus) {
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+ Diag(ReturnLoc, D)
+ << CurDecl->getDeclName() << isa<ObjCMethodDecl>(CurDecl)
+ << RetValExp->getSourceRange();
+ }
+ }
+ return Owned(new (Context) ReturnStmt(ReturnLoc, RetValExp));
+ }
+
+ if (!RetValExp && !FnRetType->isDependentType()) {
+ unsigned DiagID = diag::warn_return_missing_expr; // C90 6.6.6.4p4
+ // C99 6.8.6.4p1 (ext_ since GCC warns)
+ if (getLangOptions().C99) DiagID = diag::ext_return_missing_expr;
+
+ if (FunctionDecl *FD = getCurFunctionDecl())
+ Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0/*fn*/;
+ else
+ Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
+ return Owned(new (Context) ReturnStmt(ReturnLoc, (Expr*)0));
+ }
+
+ if (!FnRetType->isDependentType() && !RetValExp->isTypeDependent()) {
+ // we have a non-void function with an expression, continue checking
+
+ // C99 6.8.6.4p3(136): The return statement is not an assignment. The
+ // overlap restriction of subclause 6.5.16.1 does not apply to the case of
+ // function return.
+
+ // C++0x 12.8p15: When certain criteria are met, an implementation is
+ // allowed to omit the copy construction of a class object, [...]
+ // - in a return statement in a function with a class return type, when
+ // the expression is the name of a non-volatile automatic object with
+ // the same cv-unqualified type as the function return type, the copy
+ // operation can be omitted [...]
+ // C++0x 12.8p16: When the criteria for elision of a copy operation are met
+ // and the object to be copied is designated by an lvalue, overload
+ // resolution to select the constructor for the copy is first performed
+ // as if the object were designated by an rvalue.
+ // Note that we only compute Elidable if we're in C++0x, since we don't
+ // care otherwise.
+ bool Elidable = getLangOptions().CPlusPlus0x ?
+ IsReturnCopyElidable(Context, FnRetType, RetValExp) :
+ false;
+
+ // In C++ the return statement is handled via a copy initialization.
+ // the C version of which boils down to CheckSingleAssignmentConstraints.
+ // FIXME: Leaks RetValExp on error.
+ if (PerformCopyInitialization(RetValExp, FnRetType, "returning", Elidable))
+ return StmtError();
+
+ if (RetValExp) CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
+ }
+
+ return Owned(new (Context) ReturnStmt(ReturnLoc, RetValExp));
+}
+
+/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
+/// ignore "noop" casts in places where an lvalue is required by an inline asm.
+/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
+/// provide a strong guidance to not use it.
+///
+/// This method checks to see if the argument is an acceptable l-value and
+/// returns false if it is a case we can handle.
+static bool CheckAsmLValue(const Expr *E, Sema &S) {
+ if (E->isLvalue(S.Context) == Expr::LV_Valid)
+ return false; // Cool, this is an lvalue.
+
+ // Okay, this is not an lvalue, but perhaps it is the result of a cast that we
+ // are supposed to allow.
+ const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
+ if (E != E2 && E2->isLvalue(S.Context) == Expr::LV_Valid) {
+ if (!S.getLangOptions().HeinousExtensions)
+ S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ else
+ S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ // Accept, even if we emitted an error diagnostic.
+ return false;
+ }
+
+ // None of the above, just randomly invalid non-lvalue.
+ return true;
+}
+
+
+Sema::OwningStmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc,
+ bool IsSimple,
+ bool IsVolatile,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ std::string *Names,
+ MultiExprArg constraints,
+ MultiExprArg exprs,
+ ExprArg asmString,
+ MultiExprArg clobbers,
+ SourceLocation RParenLoc) {
+ unsigned NumClobbers = clobbers.size();
+ StringLiteral **Constraints =
+ reinterpret_cast<StringLiteral**>(constraints.get());
+ Expr **Exprs = reinterpret_cast<Expr **>(exprs.get());
+ StringLiteral *AsmString = cast<StringLiteral>((Expr *)asmString.get());
+ StringLiteral **Clobbers = reinterpret_cast<StringLiteral**>(clobbers.get());
+
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+
+ // The parser verifies that there is a string literal here.
+ if (AsmString->isWide())
+ return StmtError(Diag(AsmString->getLocStart(),diag::err_asm_wide_character)
+ << AsmString->getSourceRange());
+
+ for (unsigned i = 0; i != NumOutputs; i++) {
+ StringLiteral *Literal = Constraints[i];
+ if (Literal->isWide())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ TargetInfo::ConstraintInfo Info(Literal->getStrData(),
+ Literal->getByteLength(),
+ Names[i]);
+ if (!Context.Target.validateOutputConstraint(Info))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_output_constraint)
+ << Info.getConstraintStr());
+
+ // Check that the output exprs are valid lvalues.
+ Expr *OutputExpr = Exprs[i];
+ if (CheckAsmLValue(OutputExpr, *this)) {
+ return StmtError(Diag(OutputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_output)
+ << OutputExpr->getSourceRange());
+ }
+
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
+ StringLiteral *Literal = Constraints[i];
+ if (Literal->isWide())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ TargetInfo::ConstraintInfo Info(Literal->getStrData(),
+ Literal->getByteLength(),
+ Names[i]);
+ if (!Context.Target.validateInputConstraint(OutputConstraintInfos.data(),
+ NumOutputs, Info)) {
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_input_constraint)
+ << Info.getConstraintStr());
+ }
+
+ Expr *InputExpr = Exprs[i];
+
+ // Only allow void types for memory constraints.
+ if (Info.allowsMemory() && !Info.allowsRegister()) {
+ if (CheckAsmLValue(InputExpr, *this))
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_input)
+ << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+
+ if (Info.allowsRegister()) {
+ if (InputExpr->getType()->isVoidType()) {
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+ }
+
+ DefaultFunctionArrayConversion(Exprs[i]);
+
+ InputConstraintInfos.push_back(Info);
+ }
+
+ // Check that the clobbers are valid.
+ for (unsigned i = 0; i != NumClobbers; i++) {
+ StringLiteral *Literal = Clobbers[i];
+ if (Literal->isWide())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ llvm::SmallString<16> Clobber(Literal->getStrData(),
+ Literal->getStrData() +
+ Literal->getByteLength());
+
+ if (!Context.Target.isValidGCCRegisterName(Clobber.c_str()))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_unknown_register_name) << Clobber.c_str());
+ }
+
+ constraints.release();
+ exprs.release();
+ asmString.release();
+ clobbers.release();
+ AsmStmt *NS =
+ new (Context) AsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs, NumInputs,
+ Names, Constraints, Exprs, AsmString, NumClobbers,
+ Clobbers, RParenLoc);
+ // Validate the asm string, ensuring it makes sense given the operands we
+ // have.
+ llvm::SmallVector<AsmStmt::AsmStringPiece, 8> Pieces;
+ unsigned DiagOffs;
+ if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) {
+ Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
+ << AsmString->getSourceRange();
+ DeleteStmt(NS);
+ return StmtError();
+ }
+
+ // Validate tied input operands for type mismatches.
+ for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ // If this is a tied constraint, verify that the output and input have
+ // either exactly the same type, or that they are int/ptr operands with the
+ // same size (int/long, int*/long, are ok etc).
+ if (!Info.hasTiedOperand()) continue;
+
+ unsigned TiedTo = Info.getTiedOperand();
+ Expr *OutputExpr = Exprs[TiedTo];
+ Expr *InputExpr = Exprs[i+NumOutputs];
+ QualType InTy = InputExpr->getType();
+ QualType OutTy = OutputExpr->getType();
+ if (Context.hasSameType(InTy, OutTy))
+ continue; // All types can be tied to themselves.
+
+ // Int/ptr operands have some special cases that we allow.
+ if ((OutTy->isIntegerType() || OutTy->isPointerType()) &&
+ (InTy->isIntegerType() || InTy->isPointerType())) {
+
+ // They are ok if they are the same size. Tying void* to int is ok if
+ // they are the same size, for example. This also allows tying void* to
+ // int*.
+ uint64_t OutSize = Context.getTypeSize(OutTy);
+ uint64_t InSize = Context.getTypeSize(InTy);
+ if (OutSize == InSize)
+ continue;
+
+ // If the smaller input/output operand is not mentioned in the asm string,
+ // then we can promote it and the asm string won't notice. Check this
+ // case now.
+ bool SmallerValueMentioned = false;
+ for (unsigned p = 0, e = Pieces.size(); p != e; ++p) {
+ AsmStmt::AsmStringPiece &Piece = Pieces[p];
+ if (!Piece.isOperand()) continue;
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (Piece.getOperandNo() == i+NumOutputs) {
+ if (InSize < OutSize) {
+ SmallerValueMentioned = true;
+ break;
+ }
+ }
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (Piece.getOperandNo() == TiedTo) {
+ if (InSize > OutSize) {
+ SmallerValueMentioned = true;
+ break;
+ }
+ }
+ }
+
+ // If the smaller value wasn't mentioned in the asm string, and if the
+ // output was a register, just extend the shorter one to the size of the
+ // larger one.
+ if (!SmallerValueMentioned &&
+ OutputConstraintInfos[TiedTo].allowsRegister())
+ continue;
+ }
+
+ Diag(InputExpr->getLocStart(),
+ diag::err_asm_tying_incompatible_types)
+ << InTy << OutTy << OutputExpr->getSourceRange()
+ << InputExpr->getSourceRange();
+ DeleteStmt(NS);
+ return StmtError();
+ }
+
+ return Owned(NS);
+}
+
+Action::OwningStmtResult
+Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParen, DeclPtrTy Parm,
+ StmtArg Body, StmtArg catchList) {
+ Stmt *CatchList = catchList.takeAs<Stmt>();
+ ParmVarDecl *PVD = cast_or_null<ParmVarDecl>(Parm.getAs<Decl>());
+
+ // PVD == 0 implies @catch(...).
+ if (PVD) {
+ // If we already know the decl is invalid, reject it.
+ if (PVD->isInvalidDecl())
+ return StmtError();
+
+ if (!Context.isObjCObjectPointerType(PVD->getType()))
+ return StmtError(Diag(PVD->getLocation(),
+ diag::err_catch_param_not_objc_type));
+ if (PVD->getType()->isObjCQualifiedIdType())
+ return StmtError(Diag(PVD->getLocation(),
+ diag::err_illegal_qualifiers_on_catch_parm));
+ }
+
+ ObjCAtCatchStmt *CS = new (Context) ObjCAtCatchStmt(AtLoc, RParen,
+ PVD, Body.takeAs<Stmt>(), CatchList);
+ return Owned(CatchList ? CatchList : CS);
+}
+
+Action::OwningStmtResult
+Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, StmtArg Body) {
+ return Owned(new (Context) ObjCAtFinallyStmt(AtLoc,
+ static_cast<Stmt*>(Body.release())));
+}
+
+Action::OwningStmtResult
+Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc,
+ StmtArg Try, StmtArg Catch, StmtArg Finally) {
+ CurFunctionNeedsScopeChecking = true;
+ return Owned(new (Context) ObjCAtTryStmt(AtLoc, Try.takeAs<Stmt>(),
+ Catch.takeAs<Stmt>(),
+ Finally.takeAs<Stmt>()));
+}
+
+Action::OwningStmtResult
+Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, ExprArg expr,Scope *CurScope) {
+ Expr *ThrowExpr = expr.takeAs<Expr>();
+ if (!ThrowExpr) {
+ // @throw without an expression designates a rethrow (which much occur
+ // in the context of an @catch clause).
+ Scope *AtCatchParent = CurScope;
+ while (AtCatchParent && !AtCatchParent->isAtCatchScope())
+ AtCatchParent = AtCatchParent->getParent();
+ if (!AtCatchParent)
+ return StmtError(Diag(AtLoc, diag::error_rethrow_used_outside_catch));
+ } else {
+ QualType ThrowType = ThrowExpr->getType();
+ // Make sure the expression type is an ObjC pointer or "void *".
+ if (!Context.isObjCObjectPointerType(ThrowType)) {
+ const PointerType *PT = ThrowType->getAsPointerType();
+ if (!PT || !PT->getPointeeType()->isVoidType())
+ return StmtError(Diag(AtLoc, diag::error_objc_throw_expects_object)
+ << ThrowExpr->getType() << ThrowExpr->getSourceRange());
+ }
+ }
+ return Owned(new (Context) ObjCAtThrowStmt(AtLoc, ThrowExpr));
+}
+
+Action::OwningStmtResult
+Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, ExprArg SynchExpr,
+ StmtArg SynchBody) {
+ CurFunctionNeedsScopeChecking = true;
+
+ // Make sure the expression type is an ObjC pointer or "void *".
+ Expr *SyncExpr = static_cast<Expr*>(SynchExpr.get());
+ if (!Context.isObjCObjectPointerType(SyncExpr->getType())) {
+ const PointerType *PT = SyncExpr->getType()->getAsPointerType();
+ if (!PT || !PT->getPointeeType()->isVoidType())
+ return StmtError(Diag(AtLoc, diag::error_objc_synchronized_expects_object)
+ << SyncExpr->getType() << SyncExpr->getSourceRange());
+ }
+
+ return Owned(new (Context) ObjCAtSynchronizedStmt(AtLoc,
+ SynchExpr.takeAs<Stmt>(),
+ SynchBody.takeAs<Stmt>()));
+}
+
+/// ActOnCXXCatchBlock - Takes an exception declaration and a handler block
+/// and creates a proper catch handler from them.
+Action::OwningStmtResult
+Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, DeclPtrTy ExDecl,
+ StmtArg HandlerBlock) {
+ // There's nothing to test that ActOnExceptionDecl didn't already test.
+ return Owned(new (Context) CXXCatchStmt(CatchLoc,
+ cast_or_null<VarDecl>(ExDecl.getAs<Decl>()),
+ HandlerBlock.takeAs<Stmt>()));
+}
+
+/// ActOnCXXTryBlock - Takes a try compound-statement and a number of
+/// handlers and creates a try statement from them.
+Action::OwningStmtResult
+Sema::ActOnCXXTryBlock(SourceLocation TryLoc, StmtArg TryBlock,
+ MultiStmtArg RawHandlers) {
+ unsigned NumHandlers = RawHandlers.size();
+ assert(NumHandlers > 0 &&
+ "The parser shouldn't call this if there are no handlers.");
+ Stmt **Handlers = reinterpret_cast<Stmt**>(RawHandlers.get());
+
+ for(unsigned i = 0; i < NumHandlers - 1; ++i) {
+ CXXCatchStmt *Handler = llvm::cast<CXXCatchStmt>(Handlers[i]);
+ if (!Handler->getExceptionDecl())
+ return StmtError(Diag(Handler->getLocStart(), diag::err_early_catch_all));
+ }
+ // FIXME: We should detect handlers for the same type as an earlier one.
+ // This one is rather easy.
+ // FIXME: We should detect handlers that cannot catch anything because an
+ // earlier handler catches a superclass. Need to find a method that is not
+ // quadratic for this.
+ // Neither of these are explicitly forbidden, but every compiler detects them
+ // and warns.
+
+ CurFunctionNeedsScopeChecking = true;
+ RawHandlers.release();
+ return Owned(new (Context) CXXTryStmt(TryLoc,
+ static_cast<Stmt*>(TryBlock.release()),
+ Handlers, NumHandlers));
+}
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
new file mode 100644
index 0000000..782a0d8
--- /dev/null
+++ b/lib/Sema/SemaTemplate.cpp
@@ -0,0 +1,2651 @@
+//===------- SemaTemplate.cpp - Semantic Analysis for C++ Templates -------===/
+
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+
+//
+// This file implements semantic analysis for C++ templates.
+//===----------------------------------------------------------------------===/
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Basic/LangOptions.h"
+
+using namespace clang;
+
+/// isTemplateName - Determines whether the identifier II is a
+/// template name in the current scope, and returns the template
+/// declaration if II names a template. An optional CXXScope can be
+/// passed to indicate the C++ scope in which the identifier will be
+/// found.
+TemplateNameKind Sema::isTemplateName(const IdentifierInfo &II, Scope *S,
+ TemplateTy &TemplateResult,
+ const CXXScopeSpec *SS) {
+ NamedDecl *IIDecl = LookupParsedName(S, SS, &II, LookupOrdinaryName);
+
+ TemplateNameKind TNK = TNK_Non_template;
+ TemplateDecl *Template = 0;
+
+ if (IIDecl) {
+ if ((Template = dyn_cast<TemplateDecl>(IIDecl))) {
+ if (isa<FunctionTemplateDecl>(IIDecl))
+ TNK = TNK_Function_template;
+ else if (isa<ClassTemplateDecl>(IIDecl) ||
+ isa<TemplateTemplateParmDecl>(IIDecl))
+ TNK = TNK_Type_template;
+ else
+ assert(false && "Unknown template declaration kind");
+ } else if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(IIDecl)) {
+ // C++ [temp.local]p1:
+ // Like normal (non-template) classes, class templates have an
+ // injected-class-name (Clause 9). The injected-class-name
+ // can be used with or without a template-argument-list. When
+ // it is used without a template-argument-list, it is
+ // equivalent to the injected-class-name followed by the
+ // template-parameters of the class template enclosed in
+ // <>. When it is used with a template-argument-list, it
+ // refers to the specified class template specialization,
+ // which could be the current specialization or another
+ // specialization.
+ if (Record->isInjectedClassName()) {
+ Record = cast<CXXRecordDecl>(Context.getCanonicalDecl(Record));
+ if ((Template = Record->getDescribedClassTemplate()))
+ TNK = TNK_Type_template;
+ else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
+ Template = Spec->getSpecializedTemplate();
+ TNK = TNK_Type_template;
+ }
+ }
+ }
+
+ // FIXME: What follows is a gross hack.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(IIDecl)) {
+ if (FD->getType()->isDependentType()) {
+ TemplateResult = TemplateTy::make(FD);
+ return TNK_Function_template;
+ }
+ } else if (OverloadedFunctionDecl *Ovl
+ = dyn_cast<OverloadedFunctionDecl>(IIDecl)) {
+ for (OverloadedFunctionDecl::function_iterator F = Ovl->function_begin(),
+ FEnd = Ovl->function_end();
+ F != FEnd; ++F) {
+ if ((*F)->getType()->isDependentType()) {
+ TemplateResult = TemplateTy::make(Ovl);
+ return TNK_Function_template;
+ }
+ }
+ }
+
+ if (TNK != TNK_Non_template) {
+ if (SS && SS->isSet() && !SS->isInvalid()) {
+ NestedNameSpecifier *Qualifier
+ = static_cast<NestedNameSpecifier *>(SS->getScopeRep());
+ TemplateResult
+ = TemplateTy::make(Context.getQualifiedTemplateName(Qualifier,
+ false,
+ Template));
+ } else
+ TemplateResult = TemplateTy::make(TemplateName(Template));
+ }
+ }
+ return TNK;
+}
+
+/// DiagnoseTemplateParameterShadow - Produce a diagnostic complaining
+/// that the template parameter 'PrevDecl' is being shadowed by a new
+/// declaration at location Loc. Returns true to indicate that this is
+/// an error, and false otherwise.
+bool Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
+ assert(PrevDecl->isTemplateParameter() && "Not a template parameter");
+
+ // Microsoft Visual C++ permits template parameters to be shadowed.
+ if (getLangOptions().Microsoft)
+ return false;
+
+ // C++ [temp.local]p4:
+ // A template-parameter shall not be redeclared within its
+ // scope (including nested scopes).
+ Diag(Loc, diag::err_template_param_shadow)
+ << cast<NamedDecl>(PrevDecl)->getDeclName();
+ Diag(PrevDecl->getLocation(), diag::note_template_param_here);
+ return true;
+}
+
+/// AdjustDeclIfTemplate - If the given decl happens to be a template, reset
+/// the parameter D to reference the templated declaration and return a pointer
+/// to the template declaration. Otherwise, do nothing to D and return null.
+TemplateDecl *Sema::AdjustDeclIfTemplate(DeclPtrTy &D) {
+ if (TemplateDecl *Temp = dyn_cast<TemplateDecl>(D.getAs<Decl>())) {
+ D = DeclPtrTy::make(Temp->getTemplatedDecl());
+ return Temp;
+ }
+ return 0;
+}
+
+/// ActOnTypeParameter - Called when a C++ template type parameter
+/// (e.g., "typename T") has been parsed. Typename specifies whether
+/// the keyword "typename" was used to declare the type parameter
+/// (otherwise, "class" was used), and KeyLoc is the location of the
+/// "class" or "typename" keyword. ParamName is the name of the
+/// parameter (NULL indicates an unnamed template parameter) and
+/// ParamName is the location of the parameter name (if any).
+/// If the type parameter has a default argument, it will be added
+/// later via ActOnTypeParameterDefault.
+Sema::DeclPtrTy Sema::ActOnTypeParameter(Scope *S, bool Typename,
+ SourceLocation KeyLoc,
+ IdentifierInfo *ParamName,
+ SourceLocation ParamNameLoc,
+ unsigned Depth, unsigned Position) {
+ assert(S->isTemplateParamScope() &&
+ "Template type parameter not in template parameter scope!");
+ bool Invalid = false;
+
+ if (ParamName) {
+ NamedDecl *PrevDecl = LookupName(S, ParamName, LookupTagName);
+ if (PrevDecl && PrevDecl->isTemplateParameter())
+ Invalid = Invalid || DiagnoseTemplateParameterShadow(ParamNameLoc,
+ PrevDecl);
+ }
+
+ SourceLocation Loc = ParamNameLoc;
+ if (!ParamName)
+ Loc = KeyLoc;
+
+ TemplateTypeParmDecl *Param
+ = TemplateTypeParmDecl::Create(Context, CurContext, Loc,
+ Depth, Position, ParamName, Typename);
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ if (ParamName) {
+ // Add the template parameter into the current scope.
+ S->AddDecl(DeclPtrTy::make(Param));
+ IdResolver.AddDecl(Param);
+ }
+
+ return DeclPtrTy::make(Param);
+}
+
+/// ActOnTypeParameterDefault - Adds a default argument (the type
+/// Default) to the given template type parameter (TypeParam).
+void Sema::ActOnTypeParameterDefault(DeclPtrTy TypeParam,
+ SourceLocation EqualLoc,
+ SourceLocation DefaultLoc,
+ TypeTy *DefaultT) {
+ TemplateTypeParmDecl *Parm
+ = cast<TemplateTypeParmDecl>(TypeParam.getAs<Decl>());
+ QualType Default = QualType::getFromOpaquePtr(DefaultT);
+
+ // C++ [temp.param]p14:
+ // A template-parameter shall not be used in its own default argument.
+ // FIXME: Implement this check! Needs a recursive walk over the types.
+
+ // Check the template argument itself.
+ if (CheckTemplateArgument(Parm, Default, DefaultLoc)) {
+ Parm->setInvalidDecl();
+ return;
+ }
+
+ Parm->setDefaultArgument(Default, DefaultLoc, false);
+}
+
+/// \brief Check that the type of a non-type template parameter is
+/// well-formed.
+///
+/// \returns the (possibly-promoted) parameter type if valid;
+/// otherwise, produces a diagnostic and returns a NULL type.
+QualType
+Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
+ // C++ [temp.param]p4:
+ //
+ // A non-type template-parameter shall have one of the following
+ // (optionally cv-qualified) types:
+ //
+ // -- integral or enumeration type,
+ if (T->isIntegralType() || T->isEnumeralType() ||
+ // -- pointer to object or pointer to function,
+ (T->isPointerType() &&
+ (T->getAsPointerType()->getPointeeType()->isObjectType() ||
+ T->getAsPointerType()->getPointeeType()->isFunctionType())) ||
+ // -- reference to object or reference to function,
+ T->isReferenceType() ||
+ // -- pointer to member.
+ T->isMemberPointerType() ||
+ // If T is a dependent type, we can't do the check now, so we
+ // assume that it is well-formed.
+ T->isDependentType())
+ return T;
+ // C++ [temp.param]p8:
+ //
+ // A non-type template-parameter of type "array of T" or
+ // "function returning T" is adjusted to be of type "pointer to
+ // T" or "pointer to function returning T", respectively.
+ else if (T->isArrayType())
+ // FIXME: Keep the type prior to promotion?
+ return Context.getArrayDecayedType(T);
+ else if (T->isFunctionType())
+ // FIXME: Keep the type prior to promotion?
+ return Context.getPointerType(T);
+
+ Diag(Loc, diag::err_template_nontype_parm_bad_type)
+ << T;
+
+ return QualType();
+}
+
+/// ActOnNonTypeTemplateParameter - Called when a C++ non-type
+/// template parameter (e.g., "int Size" in "template<int Size>
+/// class Array") has been parsed. S is the current scope and D is
+/// the parsed declarator.
+Sema::DeclPtrTy Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
+ unsigned Depth,
+ unsigned Position) {
+ QualType T = GetTypeForDeclarator(D, S);
+
+ assert(S->isTemplateParamScope() &&
+ "Non-type template parameter not in template parameter scope!");
+ bool Invalid = false;
+
+ IdentifierInfo *ParamName = D.getIdentifier();
+ if (ParamName) {
+ NamedDecl *PrevDecl = LookupName(S, ParamName, LookupTagName);
+ if (PrevDecl && PrevDecl->isTemplateParameter())
+ Invalid = Invalid || DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
+ PrevDecl);
+ }
+
+ T = CheckNonTypeTemplateParameterType(T, D.getIdentifierLoc());
+ if (T.isNull()) {
+ T = Context.IntTy; // Recover with an 'int' type.
+ Invalid = true;
+ }
+
+ NonTypeTemplateParmDecl *Param
+ = NonTypeTemplateParmDecl::Create(Context, CurContext, D.getIdentifierLoc(),
+ Depth, Position, ParamName, T);
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ if (D.getIdentifier()) {
+ // Add the template parameter into the current scope.
+ S->AddDecl(DeclPtrTy::make(Param));
+ IdResolver.AddDecl(Param);
+ }
+ return DeclPtrTy::make(Param);
+}
+
+/// \brief Adds a default argument to the given non-type template
+/// parameter.
+void Sema::ActOnNonTypeTemplateParameterDefault(DeclPtrTy TemplateParamD,
+ SourceLocation EqualLoc,
+ ExprArg DefaultE) {
+ NonTypeTemplateParmDecl *TemplateParm
+ = cast<NonTypeTemplateParmDecl>(TemplateParamD.getAs<Decl>());
+ Expr *Default = static_cast<Expr *>(DefaultE.get());
+
+ // C++ [temp.param]p14:
+ // A template-parameter shall not be used in its own default argument.
+ // FIXME: Implement this check! Needs a recursive walk over the types.
+
+ // Check the well-formedness of the default template argument.
+ if (CheckTemplateArgument(TemplateParm, TemplateParm->getType(), Default)) {
+ TemplateParm->setInvalidDecl();
+ return;
+ }
+
+ TemplateParm->setDefaultArgument(DefaultE.takeAs<Expr>());
+}
+
+
+/// ActOnTemplateTemplateParameter - Called when a C++ template template
+/// parameter (e.g. T in template <template <typename> class T> class array)
+/// has been parsed. S is the current scope.
+Sema::DeclPtrTy Sema::ActOnTemplateTemplateParameter(Scope* S,
+ SourceLocation TmpLoc,
+ TemplateParamsTy *Params,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ unsigned Depth,
+ unsigned Position)
+{
+ assert(S->isTemplateParamScope() &&
+ "Template template parameter not in template parameter scope!");
+
+ // Construct the parameter object.
+ TemplateTemplateParmDecl *Param =
+ TemplateTemplateParmDecl::Create(Context, CurContext, TmpLoc, Depth,
+ Position, Name,
+ (TemplateParameterList*)Params);
+
+ // Make sure the parameter is valid.
+ // FIXME: Decl object is not currently invalidated anywhere so this doesn't
+ // do anything yet. However, if the template parameter list or (eventual)
+ // default value is ever invalidated, that will propagate here.
+ bool Invalid = false;
+ if (Invalid) {
+ Param->setInvalidDecl();
+ }
+
+ // If the tt-param has a name, then link the identifier into the scope
+ // and lookup mechanisms.
+ if (Name) {
+ S->AddDecl(DeclPtrTy::make(Param));
+ IdResolver.AddDecl(Param);
+ }
+
+ return DeclPtrTy::make(Param);
+}
+
+/// \brief Adds a default argument to the given template template
+/// parameter.
+void Sema::ActOnTemplateTemplateParameterDefault(DeclPtrTy TemplateParamD,
+ SourceLocation EqualLoc,
+ ExprArg DefaultE) {
+ TemplateTemplateParmDecl *TemplateParm
+ = cast<TemplateTemplateParmDecl>(TemplateParamD.getAs<Decl>());
+
+ // Since a template-template parameter's default argument is an
+ // id-expression, it must be a DeclRefExpr.
+ DeclRefExpr *Default
+ = cast<DeclRefExpr>(static_cast<Expr *>(DefaultE.get()));
+
+ // C++ [temp.param]p14:
+ // A template-parameter shall not be used in its own default argument.
+ // FIXME: Implement this check! Needs a recursive walk over the types.
+
+ // Check the well-formedness of the template argument.
+ if (!isa<TemplateDecl>(Default->getDecl())) {
+ Diag(Default->getSourceRange().getBegin(),
+ diag::err_template_arg_must_be_template)
+ << Default->getSourceRange();
+ TemplateParm->setInvalidDecl();
+ return;
+ }
+ if (CheckTemplateArgument(TemplateParm, Default)) {
+ TemplateParm->setInvalidDecl();
+ return;
+ }
+
+ DefaultE.release();
+ TemplateParm->setDefaultArgument(Default);
+}
+
+/// ActOnTemplateParameterList - Builds a TemplateParameterList that
+/// contains the template parameters in Params/NumParams.
+Sema::TemplateParamsTy *
+Sema::ActOnTemplateParameterList(unsigned Depth,
+ SourceLocation ExportLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ DeclPtrTy *Params, unsigned NumParams,
+ SourceLocation RAngleLoc) {
+ if (ExportLoc.isValid())
+ Diag(ExportLoc, diag::note_template_export_unsupported);
+
+ return TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
+ (Decl**)Params, NumParams, RAngleLoc);
+}
+
+Sema::DeclResult
+Sema::ActOnClassTemplate(Scope *S, unsigned TagSpec, TagKind TK,
+ SourceLocation KWLoc, const CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TemplateParameterLists,
+ AccessSpecifier AS) {
+ assert(TemplateParameterLists.size() > 0 && "No template parameter lists?");
+ assert(TK != TK_Reference && "Can only declare or define class templates");
+ bool Invalid = false;
+
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParameterLists))
+ return true;
+
+ TagDecl::TagKind Kind;
+ switch (TagSpec) {
+ default: assert(0 && "Unknown tag type!");
+ case DeclSpec::TST_struct: Kind = TagDecl::TK_struct; break;
+ case DeclSpec::TST_union: Kind = TagDecl::TK_union; break;
+ case DeclSpec::TST_class: Kind = TagDecl::TK_class; break;
+ }
+
+ // There is no such thing as an unnamed class template.
+ if (!Name) {
+ Diag(KWLoc, diag::err_template_unnamed_class);
+ return true;
+ }
+
+ // Find any previous declaration with this name.
+ LookupResult Previous = LookupParsedName(S, &SS, Name, LookupOrdinaryName,
+ true);
+ assert(!Previous.isAmbiguous() && "Ambiguity in class template redecl?");
+ NamedDecl *PrevDecl = 0;
+ if (Previous.begin() != Previous.end())
+ PrevDecl = *Previous.begin();
+
+ DeclContext *SemanticContext = CurContext;
+ if (SS.isNotEmpty() && !SS.isInvalid()) {
+ SemanticContext = computeDeclContext(SS);
+
+ // FIXME: need to match up several levels of template parameter lists here.
+ }
+
+ // FIXME: member templates!
+ TemplateParameterList *TemplateParams
+ = static_cast<TemplateParameterList *>(*TemplateParameterLists.release());
+
+ // If there is a previous declaration with the same name, check
+ // whether this is a valid redeclaration.
+ ClassTemplateDecl *PrevClassTemplate
+ = dyn_cast_or_null<ClassTemplateDecl>(PrevDecl);
+ if (PrevClassTemplate) {
+ // Ensure that the template parameter lists are compatible.
+ if (!TemplateParameterListsAreEqual(TemplateParams,
+ PrevClassTemplate->getTemplateParameters(),
+ /*Complain=*/true))
+ return true;
+
+ // C++ [temp.class]p4:
+ // In a redeclaration, partial specialization, explicit
+ // specialization or explicit instantiation of a class template,
+ // the class-key shall agree in kind with the original class
+ // template declaration (7.1.5.3).
+ RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl();
+ if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind, KWLoc, *Name)) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << Name
+ << CodeModificationHint::CreateReplacement(KWLoc,
+ PrevRecordDecl->getKindName());
+ Diag(PrevRecordDecl->getLocation(), diag::note_previous_use);
+ Kind = PrevRecordDecl->getTagKind();
+ }
+
+ // Check for redefinition of this class template.
+ if (TK == TK_Definition) {
+ if (TagDecl *Def = PrevRecordDecl->getDefinition(Context)) {
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ // FIXME: Would it make sense to try to "forget" the previous
+ // definition, as part of error recovery?
+ return true;
+ }
+ }
+ } else if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(NameLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ } else if (PrevDecl) {
+ // C++ [temp]p5:
+ // A class template shall not have the same name as any other
+ // template, class, function, object, enumeration, enumerator,
+ // namespace, or type in the same scope (3.3), except as specified
+ // in (14.5.4).
+ Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return true;
+ }
+
+ // Check the template parameter list of this declaration, possibly
+ // merging in the template parameter list from the previous class
+ // template declaration.
+ if (CheckTemplateParameterList(TemplateParams,
+ PrevClassTemplate? PrevClassTemplate->getTemplateParameters() : 0))
+ Invalid = true;
+
+ // FIXME: If we had a scope specifier, we better have a previous template
+ // declaration!
+
+ CXXRecordDecl *NewClass =
+ CXXRecordDecl::Create(Context, Kind, SemanticContext, NameLoc, Name,
+ PrevClassTemplate?
+ PrevClassTemplate->getTemplatedDecl() : 0,
+ /*DelayTypeCreation=*/true);
+
+ ClassTemplateDecl *NewTemplate
+ = ClassTemplateDecl::Create(Context, SemanticContext, NameLoc,
+ DeclarationName(Name), TemplateParams,
+ NewClass, PrevClassTemplate);
+ NewClass->setDescribedClassTemplate(NewTemplate);
+
+ // Build the type for the class template declaration now.
+ QualType T =
+ Context.getTypeDeclType(NewClass,
+ PrevClassTemplate?
+ PrevClassTemplate->getTemplatedDecl() : 0);
+ assert(T->isDependentType() && "Class template type is not dependent?");
+ (void)T;
+
+ // Set the access specifier.
+ SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
+
+ // Set the lexical context of these templates
+ NewClass->setLexicalDeclContext(CurContext);
+ NewTemplate->setLexicalDeclContext(CurContext);
+
+ if (TK == TK_Definition)
+ NewClass->startDefinition();
+
+ if (Attr)
+ ProcessDeclAttributeList(NewClass, Attr);
+
+ PushOnScopeChains(NewTemplate, S);
+
+ if (Invalid) {
+ NewTemplate->setInvalidDecl();
+ NewClass->setInvalidDecl();
+ }
+ return DeclPtrTy::make(NewTemplate);
+}
+
+/// \brief Checks the validity of a template parameter list, possibly
+/// considering the template parameter list from a previous
+/// declaration.
+///
+/// If an "old" template parameter list is provided, it must be
+/// equivalent (per TemplateParameterListsAreEqual) to the "new"
+/// template parameter list.
+///
+/// \param NewParams Template parameter list for a new template
+/// declaration. This template parameter list will be updated with any
+/// default arguments that are carried through from the previous
+/// template parameter list.
+///
+/// \param OldParams If provided, template parameter list from a
+/// previous declaration of the same template. Default template
+/// arguments will be merged from the old template parameter list to
+/// the new template parameter list.
+///
+/// \returns true if an error occurred, false otherwise.
+bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
+ TemplateParameterList *OldParams) {
+ bool Invalid = false;
+
+ // C++ [temp.param]p10:
+ // The set of default template-arguments available for use with a
+ // template declaration or definition is obtained by merging the
+ // default arguments from the definition (if in scope) and all
+ // declarations in scope in the same way default function
+ // arguments are (8.3.6).
+ bool SawDefaultArgument = false;
+ SourceLocation PreviousDefaultArgLoc;
+
+ // Dummy initialization to avoid warnings.
+ TemplateParameterList::iterator OldParam = NewParams->end();
+ if (OldParams)
+ OldParam = OldParams->begin();
+
+ for (TemplateParameterList::iterator NewParam = NewParams->begin(),
+ NewParamEnd = NewParams->end();
+ NewParam != NewParamEnd; ++NewParam) {
+ // Variables used to diagnose redundant default arguments
+ bool RedundantDefaultArg = false;
+ SourceLocation OldDefaultLoc;
+ SourceLocation NewDefaultLoc;
+
+ // Variables used to diagnose missing default arguments
+ bool MissingDefaultArg = false;
+
+ // Merge default arguments for template type parameters.
+ if (TemplateTypeParmDecl *NewTypeParm
+ = dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
+ TemplateTypeParmDecl *OldTypeParm
+ = OldParams? cast<TemplateTypeParmDecl>(*OldParam) : 0;
+
+ if (OldTypeParm && OldTypeParm->hasDefaultArgument() &&
+ NewTypeParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc();
+ NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldTypeParm && OldTypeParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ SawDefaultArgument = true;
+ NewTypeParm->setDefaultArgument(OldTypeParm->getDefaultArgument(),
+ OldTypeParm->getDefaultArgumentLoc(),
+ true);
+ PreviousDefaultArgLoc = OldTypeParm->getDefaultArgumentLoc();
+ } else if (NewTypeParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc = NewTypeParm->getDefaultArgumentLoc();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ }
+ // Merge default arguments for non-type template parameters
+ else if (NonTypeTemplateParmDecl *NewNonTypeParm
+ = dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) {
+ NonTypeTemplateParmDecl *OldNonTypeParm
+ = OldParams? cast<NonTypeTemplateParmDecl>(*OldParam) : 0;
+ if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument() &&
+ NewNonTypeParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
+ NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ SawDefaultArgument = true;
+ // FIXME: We need to create a new kind of "default argument"
+ // expression that points to a previous template template
+ // parameter.
+ NewNonTypeParm->setDefaultArgument(
+ OldNonTypeParm->getDefaultArgument());
+ PreviousDefaultArgLoc = OldNonTypeParm->getDefaultArgumentLoc();
+ } else if (NewNonTypeParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc = NewNonTypeParm->getDefaultArgumentLoc();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ }
+ // Merge default arguments for template template parameters
+ else {
+ TemplateTemplateParmDecl *NewTemplateParm
+ = cast<TemplateTemplateParmDecl>(*NewParam);
+ TemplateTemplateParmDecl *OldTemplateParm
+ = OldParams? cast<TemplateTemplateParmDecl>(*OldParam) : 0;
+ if (OldTemplateParm && OldTemplateParm->hasDefaultArgument() &&
+ NewTemplateParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldTemplateParm->getDefaultArgumentLoc();
+ NewDefaultLoc = NewTemplateParm->getDefaultArgumentLoc();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ SawDefaultArgument = true;
+ // FIXME: We need to create a new kind of "default argument" expression
+ // that points to a previous template template parameter.
+ NewTemplateParm->setDefaultArgument(
+ OldTemplateParm->getDefaultArgument());
+ PreviousDefaultArgLoc = OldTemplateParm->getDefaultArgumentLoc();
+ } else if (NewTemplateParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc = NewTemplateParm->getDefaultArgumentLoc();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ }
+
+ if (RedundantDefaultArg) {
+ // C++ [temp.param]p12:
+ // A template-parameter shall not be given default arguments
+ // by two different declarations in the same scope.
+ Diag(NewDefaultLoc, diag::err_template_param_default_arg_redefinition);
+ Diag(OldDefaultLoc, diag::note_template_param_prev_default_arg);
+ Invalid = true;
+ } else if (MissingDefaultArg) {
+ // C++ [temp.param]p11:
+ // If a template-parameter has a default template-argument,
+ // all subsequent template-parameters shall have a default
+ // template-argument supplied.
+ Diag((*NewParam)->getLocation(),
+ diag::err_template_param_default_arg_missing);
+ Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg);
+ Invalid = true;
+ }
+
+ // If we have an old template parameter list that we're merging
+ // in, move on to the next parameter.
+ if (OldParams)
+ ++OldParam;
+ }
+
+ return Invalid;
+}
+
+/// \brief Translates template arguments as provided by the parser
+/// into template arguments used by semantic analysis.
+static void
+translateTemplateArguments(ASTTemplateArgsPtr &TemplateArgsIn,
+ SourceLocation *TemplateArgLocs,
+ llvm::SmallVector<TemplateArgument, 16> &TemplateArgs) {
+ TemplateArgs.reserve(TemplateArgsIn.size());
+
+ void **Args = TemplateArgsIn.getArgs();
+ bool *ArgIsType = TemplateArgsIn.getArgIsType();
+ for (unsigned Arg = 0, Last = TemplateArgsIn.size(); Arg != Last; ++Arg) {
+ TemplateArgs.push_back(
+ ArgIsType[Arg]? TemplateArgument(TemplateArgLocs[Arg],
+ QualType::getFromOpaquePtr(Args[Arg]))
+ : TemplateArgument(reinterpret_cast<Expr *>(Args[Arg])));
+ }
+}
+
+/// \brief Build a canonical version of a template argument list.
+///
+/// This function builds a canonical version of the given template
+/// argument list, where each of the template arguments has been
+/// converted into its canonical form. This routine is typically used
+/// to canonicalize a template argument list when the template name
+/// itself is dependent. When the template name refers to an actual
+/// template declaration, Sema::CheckTemplateArgumentList should be
+/// used to check and canonicalize the template arguments.
+///
+/// \param TemplateArgs The incoming template arguments.
+///
+/// \param NumTemplateArgs The number of template arguments in \p
+/// TemplateArgs.
+///
+/// \param Canonical A vector to be filled with the canonical versions
+/// of the template arguments.
+///
+/// \param Context The ASTContext in which the template arguments live.
+static void CanonicalizeTemplateArguments(const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ llvm::SmallVectorImpl<TemplateArgument> &Canonical,
+ ASTContext &Context) {
+ Canonical.reserve(NumTemplateArgs);
+ for (unsigned Idx = 0; Idx < NumTemplateArgs; ++Idx) {
+ switch (TemplateArgs[Idx].getKind()) {
+ case TemplateArgument::Expression:
+ // FIXME: Build canonical expression (!)
+ Canonical.push_back(TemplateArgs[Idx]);
+ break;
+
+ case TemplateArgument::Declaration:
+ Canonical.push_back(
+ TemplateArgument(SourceLocation(),
+ Context.getCanonicalDecl(TemplateArgs[Idx].getAsDecl())));
+ break;
+
+ case TemplateArgument::Integral:
+ Canonical.push_back(TemplateArgument(SourceLocation(),
+ *TemplateArgs[Idx].getAsIntegral(),
+ TemplateArgs[Idx].getIntegralType()));
+
+ case TemplateArgument::Type: {
+ QualType CanonType
+ = Context.getCanonicalType(TemplateArgs[Idx].getAsType());
+ Canonical.push_back(TemplateArgument(SourceLocation(), CanonType));
+ }
+ }
+ }
+}
+
+QualType Sema::CheckTemplateIdType(TemplateName Name,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceLocation RAngleLoc) {
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (!Template) {
+ // The template name does not resolve to a template, so we just
+ // build a dependent template-id type.
+
+ // Canonicalize the template arguments to build the canonical
+ // template-id type.
+ llvm::SmallVector<TemplateArgument, 16> CanonicalTemplateArgs;
+ CanonicalizeTemplateArguments(TemplateArgs, NumTemplateArgs,
+ CanonicalTemplateArgs, Context);
+
+ TemplateName CanonName = Context.getCanonicalTemplateName(Name);
+ QualType CanonType
+ = Context.getTemplateSpecializationType(CanonName,
+ &CanonicalTemplateArgs[0],
+ CanonicalTemplateArgs.size());
+
+ // Build the dependent template-id type.
+ return Context.getTemplateSpecializationType(Name, TemplateArgs,
+ NumTemplateArgs, CanonType);
+ }
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ llvm::SmallVector<TemplateArgument, 16> ConvertedTemplateArgs;
+ if (CheckTemplateArgumentList(Template, TemplateLoc, LAngleLoc,
+ TemplateArgs, NumTemplateArgs, RAngleLoc,
+ ConvertedTemplateArgs))
+ return QualType();
+
+ assert((ConvertedTemplateArgs.size() ==
+ Template->getTemplateParameters()->size()) &&
+ "Converted template argument list is too short!");
+
+ QualType CanonType;
+
+ if (TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs,
+ NumTemplateArgs)) {
+ // This class template specialization is a dependent
+ // type. Therefore, its canonical type is another class template
+ // specialization type that contains all of the converted
+ // arguments in canonical form. This ensures that, e.g., A<T> and
+ // A<T, T> have identical types when A is declared as:
+ //
+ // template<typename T, typename U = T> struct A;
+ TemplateName CanonName = Context.getCanonicalTemplateName(Name);
+ CanonType = Context.getTemplateSpecializationType(CanonName,
+ &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size());
+ } else if (ClassTemplateDecl *ClassTemplate
+ = dyn_cast<ClassTemplateDecl>(Template)) {
+ // Find the class template specialization declaration that
+ // corresponds to these arguments.
+ llvm::FoldingSetNodeID ID;
+ ClassTemplateSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size());
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *Decl
+ = ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
+ if (!Decl) {
+ // This is the first time we have referenced this class template
+ // specialization. Create the canonical declaration and add it to
+ // the set of specializations.
+ Decl = ClassTemplateSpecializationDecl::Create(Context,
+ ClassTemplate->getDeclContext(),
+ TemplateLoc,
+ ClassTemplate,
+ &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size(),
+ 0);
+ ClassTemplate->getSpecializations().InsertNode(Decl, InsertPos);
+ Decl->setLexicalDeclContext(CurContext);
+ }
+
+ CanonType = Context.getTypeDeclType(Decl);
+ }
+
+ // Build the fully-sugared type for this class template
+ // specialization, which refers back to the class template
+ // specialization we created or found.
+ return Context.getTemplateSpecializationType(Name, TemplateArgs,
+ NumTemplateArgs, CanonType);
+}
+
+Action::TypeResult
+Sema::ActOnTemplateIdType(TemplateTy TemplateD, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation *TemplateArgLocs,
+ SourceLocation RAngleLoc) {
+ TemplateName Template = TemplateD.getAsVal<TemplateName>();
+
+ // Translate the parser's template argument list in our AST format.
+ llvm::SmallVector<TemplateArgument, 16> TemplateArgs;
+ translateTemplateArguments(TemplateArgsIn, TemplateArgLocs, TemplateArgs);
+
+ QualType Result = CheckTemplateIdType(Template, TemplateLoc, LAngleLoc,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ RAngleLoc);
+ TemplateArgsIn.release();
+
+ if (Result.isNull())
+ return true;
+
+ return Result.getAsOpaquePtr();
+}
+
+/// \brief Form a dependent template name.
+///
+/// This action forms a dependent template name given the template
+/// name and its (presumably dependent) scope specifier. For
+/// example, given "MetaFun::template apply", the scope specifier \p
+/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
+/// of the "template" keyword, and "apply" is the \p Name.
+Sema::TemplateTy
+Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc,
+ const IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ const CXXScopeSpec &SS) {
+ if (!SS.isSet() || SS.isInvalid())
+ return TemplateTy();
+
+ NestedNameSpecifier *Qualifier
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+
+ // FIXME: member of the current instantiation
+
+ if (!Qualifier->isDependent()) {
+ // C++0x [temp.names]p5:
+ // If a name prefixed by the keyword template is not the name of
+ // a template, the program is ill-formed. [Note: the keyword
+ // template may not be applied to non-template members of class
+ // templates. -end note ] [ Note: as is the case with the
+ // typename prefix, the template prefix is allowed in cases
+ // where it is not strictly necessary; i.e., when the
+ // nested-name-specifier or the expression on the left of the ->
+ // or . is not dependent on a template-parameter, or the use
+ // does not appear in the scope of a template. -end note]
+ //
+ // Note: C++03 was more strict here, because it banned the use of
+ // the "template" keyword prior to a template-name that was not a
+ // dependent name. C++ DR468 relaxed this requirement (the
+ // "template" keyword is now permitted). We follow the C++0x
+ // rules, even in C++03 mode, retroactively applying the DR.
+ TemplateTy Template;
+ TemplateNameKind TNK = isTemplateName(Name, 0, Template, &SS);
+ if (TNK == TNK_Non_template) {
+ Diag(NameLoc, diag::err_template_kw_refers_to_non_template)
+ << &Name;
+ return TemplateTy();
+ }
+
+ return Template;
+ }
+
+ return TemplateTy::make(Context.getDependentTemplateName(Qualifier, &Name));
+}
+
+/// \brief Check that the given template argument list is well-formed
+/// for specializing the given template.
+bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceLocation RAngleLoc,
+ llvm::SmallVectorImpl<TemplateArgument> &Converted) {
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ unsigned NumParams = Params->size();
+ unsigned NumArgs = NumTemplateArgs;
+ bool Invalid = false;
+
+ if (NumArgs > NumParams ||
+ NumArgs < Params->getMinRequiredArguments()) {
+ // FIXME: point at either the first arg beyond what we can handle,
+ // or the '>', depending on whether we have too many or too few
+ // arguments.
+ SourceRange Range;
+ if (NumArgs > NumParams)
+ Range = SourceRange(TemplateArgs[NumParams].getLocation(), RAngleLoc);
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << (NumArgs > NumParams)
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template << Range;
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ Invalid = true;
+ }
+
+ // C++ [temp.arg]p1:
+ // [...] The type and form of each template-argument specified in
+ // a template-id shall match the type and form specified for the
+ // corresponding parameter declared by the template in its
+ // template-parameter-list.
+ unsigned ArgIdx = 0;
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; ++Param, ++ArgIdx) {
+ // Decode the template argument
+ TemplateArgument Arg;
+ if (ArgIdx >= NumArgs) {
+ // Retrieve the default template argument from the template
+ // parameter.
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ if (!TTP->hasDefaultArgument())
+ break;
+
+ QualType ArgType = TTP->getDefaultArgument();
+
+ // If the argument type is dependent, instantiate it now based
+ // on the previously-computed template arguments.
+ if (ArgType->isDependentType()) {
+ InstantiatingTemplate Inst(*this, TemplateLoc,
+ Template, &Converted[0],
+ Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ TemplateArgumentList TemplateArgs(Context, &Converted[0],
+ Converted.size(),
+ /*CopyArgs=*/false);
+ ArgType = InstantiateType(ArgType, TemplateArgs,
+ TTP->getDefaultArgumentLoc(),
+ TTP->getDeclName());
+ }
+
+ if (ArgType.isNull())
+ return true;
+
+ Arg = TemplateArgument(TTP->getLocation(), ArgType);
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ if (!NTTP->hasDefaultArgument())
+ break;
+
+ // FIXME: Instantiate default argument
+ Arg = TemplateArgument(NTTP->getDefaultArgument());
+ } else {
+ TemplateTemplateParmDecl *TempParm
+ = cast<TemplateTemplateParmDecl>(*Param);
+
+ if (!TempParm->hasDefaultArgument())
+ break;
+
+ // FIXME: Instantiate default argument
+ Arg = TemplateArgument(TempParm->getDefaultArgument());
+ }
+ } else {
+ // Retrieve the template argument produced by the user.
+ Arg = TemplateArgs[ArgIdx];
+ }
+
+
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ // Check template type parameters.
+ if (Arg.getKind() == TemplateArgument::Type) {
+ if (CheckTemplateArgument(TTP, Arg.getAsType(), Arg.getLocation()))
+ Invalid = true;
+
+ // Add the converted template type argument.
+ Converted.push_back(
+ TemplateArgument(Arg.getLocation(),
+ Context.getCanonicalType(Arg.getAsType())));
+ continue;
+ }
+
+ // C++ [temp.arg.type]p1:
+ // A template-argument for a template-parameter which is a
+ // type shall be a type-id.
+
+ // We have a template type parameter but the template argument
+ // is not a type.
+ Diag(Arg.getLocation(), diag::err_template_arg_must_be_type);
+ Diag((*Param)->getLocation(), diag::note_template_param_here);
+ Invalid = true;
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ // Check non-type template parameters.
+
+ // Instantiate the type of the non-type template parameter with
+ // the template arguments we've seen thus far.
+ QualType NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ // Instantiate the type of the non-type template parameter.
+ InstantiatingTemplate Inst(*this, TemplateLoc,
+ Template, &Converted[0],
+ Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ TemplateArgumentList TemplateArgs(Context, &Converted[0],
+ Converted.size(),
+ /*CopyArgs=*/false);
+ NTTPType = InstantiateType(NTTPType, TemplateArgs,
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ // If that worked, check the non-type template parameter type
+ // for validity.
+ if (!NTTPType.isNull())
+ NTTPType = CheckNonTypeTemplateParameterType(NTTPType,
+ NTTP->getLocation());
+
+ if (NTTPType.isNull()) {
+ Invalid = true;
+ break;
+ }
+ }
+
+ switch (Arg.getKind()) {
+ case TemplateArgument::Expression: {
+ Expr *E = Arg.getAsExpr();
+ if (CheckTemplateArgument(NTTP, NTTPType, E, &Converted))
+ Invalid = true;
+ break;
+ }
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ // We've already checked this template argument, so just copy
+ // it to the list of converted arguments.
+ Converted.push_back(Arg);
+ break;
+
+ case TemplateArgument::Type:
+ // We have a non-type template parameter but the template
+ // argument is a type.
+
+ // C++ [temp.arg]p2:
+ // In a template-argument, an ambiguity between a type-id and
+ // an expression is resolved to a type-id, regardless of the
+ // form of the corresponding template-parameter.
+ //
+ // We warn specifically about this case, since it can be rather
+ // confusing for users.
+ if (Arg.getAsType()->isFunctionType())
+ Diag(Arg.getLocation(), diag::err_template_arg_nontype_ambig)
+ << Arg.getAsType();
+ else
+ Diag(Arg.getLocation(), diag::err_template_arg_must_be_expr);
+ Diag((*Param)->getLocation(), diag::note_template_param_here);
+ Invalid = true;
+ }
+ } else {
+ // Check template template parameters.
+ TemplateTemplateParmDecl *TempParm
+ = cast<TemplateTemplateParmDecl>(*Param);
+
+ switch (Arg.getKind()) {
+ case TemplateArgument::Expression: {
+ Expr *ArgExpr = Arg.getAsExpr();
+ if (ArgExpr && isa<DeclRefExpr>(ArgExpr) &&
+ isa<TemplateDecl>(cast<DeclRefExpr>(ArgExpr)->getDecl())) {
+ if (CheckTemplateArgument(TempParm, cast<DeclRefExpr>(ArgExpr)))
+ Invalid = true;
+
+ // Add the converted template argument.
+ Decl *D
+ = Context.getCanonicalDecl(cast<DeclRefExpr>(ArgExpr)->getDecl());
+ Converted.push_back(TemplateArgument(Arg.getLocation(), D));
+ continue;
+ }
+ }
+ // fall through
+
+ case TemplateArgument::Type: {
+ // We have a template template parameter but the template
+ // argument does not refer to a template.
+ Diag(Arg.getLocation(), diag::err_template_arg_must_be_template);
+ Invalid = true;
+ break;
+ }
+
+ case TemplateArgument::Declaration:
+ // We've already checked this template argument, so just copy
+ // it to the list of converted arguments.
+ Converted.push_back(Arg);
+ break;
+
+ case TemplateArgument::Integral:
+ assert(false && "Integral argument with template template parameter");
+ break;
+ }
+ }
+ }
+
+ return Invalid;
+}
+
+/// \brief Check a template argument against its corresponding
+/// template type parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.type]. It
+/// returns true if an error occurred, and false otherwise.
+bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
+ QualType Arg, SourceLocation ArgLoc) {
+ // C++ [temp.arg.type]p2:
+ // A local type, a type with no linkage, an unnamed type or a type
+ // compounded from any of these types shall not be used as a
+ // template-argument for a template type-parameter.
+ //
+ // FIXME: Perform the recursive and no-linkage type checks.
+ const TagType *Tag = 0;
+ if (const EnumType *EnumT = Arg->getAsEnumType())
+ Tag = EnumT;
+ else if (const RecordType *RecordT = Arg->getAsRecordType())
+ Tag = RecordT;
+ if (Tag && Tag->getDecl()->getDeclContext()->isFunctionOrMethod())
+ return Diag(ArgLoc, diag::err_template_arg_local_type)
+ << QualType(Tag, 0);
+ else if (Tag && !Tag->getDecl()->getDeclName() &&
+ !Tag->getDecl()->getTypedefForAnonDecl()) {
+ Diag(ArgLoc, diag::err_template_arg_unnamed_type);
+ Diag(Tag->getDecl()->getLocation(), diag::note_template_unnamed_type_here);
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Checks whether the given template argument is the address
+/// of an object or function according to C++ [temp.arg.nontype]p1.
+bool Sema::CheckTemplateArgumentAddressOfObjectOrFunction(Expr *Arg,
+ NamedDecl *&Entity) {
+ bool Invalid = false;
+
+ // See through any implicit casts we added to fix the type.
+ if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = Cast->getSubExpr();
+
+ // C++0x allows nullptr, and there's no further checking to be done for that.
+ if (Arg->getType()->isNullPtrType())
+ return false;
+
+ // C++ [temp.arg.nontype]p1:
+ //
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of: [...]
+ //
+ // -- the address of an object or function with external
+ // linkage, including function templates and function
+ // template-ids but excluding non-static class members,
+ // expressed as & id-expression where the & is optional if
+ // the name refers to a function or array, or if the
+ // corresponding template-parameter is a reference; or
+ DeclRefExpr *DRE = 0;
+
+ // Ignore (and complain about) any excess parentheses.
+ while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
+ if (!Invalid) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_extra_parens)
+ << Arg->getSourceRange();
+ Invalid = true;
+ }
+
+ Arg = Parens->getSubExpr();
+ }
+
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
+ if (UnOp->getOpcode() == UnaryOperator::AddrOf)
+ DRE = dyn_cast<DeclRefExpr>(UnOp->getSubExpr());
+ } else
+ DRE = dyn_cast<DeclRefExpr>(Arg);
+
+ if (!DRE || !isa<ValueDecl>(DRE->getDecl()))
+ return Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_object_or_func_form)
+ << Arg->getSourceRange();
+
+ // Cannot refer to non-static data members
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(DRE->getDecl()))
+ return Diag(Arg->getSourceRange().getBegin(), diag::err_template_arg_field)
+ << Field << Arg->getSourceRange();
+
+ // Cannot refer to non-static member functions
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(DRE->getDecl()))
+ if (!Method->isStatic())
+ return Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_method)
+ << Method << Arg->getSourceRange();
+
+ // Functions must have external linkage.
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(DRE->getDecl())) {
+ if (Func->getStorageClass() == FunctionDecl::Static) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_function_not_extern)
+ << Func << Arg->getSourceRange();
+ Diag(Func->getLocation(), diag::note_template_arg_internal_object)
+ << true;
+ return true;
+ }
+
+ // Okay: we've named a function with external linkage.
+ Entity = Func;
+ return Invalid;
+ }
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!Var->hasGlobalStorage()) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_object_not_extern)
+ << Var << Arg->getSourceRange();
+ Diag(Var->getLocation(), diag::note_template_arg_internal_object)
+ << true;
+ return true;
+ }
+
+ // Okay: we've named an object with external linkage
+ Entity = Var;
+ return Invalid;
+ }
+
+ // We found something else, but we don't know specifically what it is.
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_object_or_func)
+ << Arg->getSourceRange();
+ Diag(DRE->getDecl()->getLocation(),
+ diag::note_template_arg_refers_here);
+ return true;
+}
+
+/// \brief Checks whether the given template argument is a pointer to
+/// member constant according to C++ [temp.arg.nontype]p1.
+bool
+Sema::CheckTemplateArgumentPointerToMember(Expr *Arg, NamedDecl *&Member) {
+ bool Invalid = false;
+
+ // See through any implicit casts we added to fix the type.
+ if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = Cast->getSubExpr();
+
+ // C++0x allows nullptr, and there's no further checking to be done for that.
+ if (Arg->getType()->isNullPtrType())
+ return false;
+
+ // C++ [temp.arg.nontype]p1:
+ //
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of: [...]
+ //
+ // -- a pointer to member expressed as described in 5.3.1.
+ QualifiedDeclRefExpr *DRE = 0;
+
+ // Ignore (and complain about) any excess parentheses.
+ while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
+ if (!Invalid) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_extra_parens)
+ << Arg->getSourceRange();
+ Invalid = true;
+ }
+
+ Arg = Parens->getSubExpr();
+ }
+
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg))
+ if (UnOp->getOpcode() == UnaryOperator::AddrOf)
+ DRE = dyn_cast<QualifiedDeclRefExpr>(UnOp->getSubExpr());
+
+ if (!DRE)
+ return Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
+
+ if (isa<FieldDecl>(DRE->getDecl()) || isa<CXXMethodDecl>(DRE->getDecl())) {
+ assert((isa<FieldDecl>(DRE->getDecl()) ||
+ !cast<CXXMethodDecl>(DRE->getDecl())->isStatic()) &&
+ "Only non-static member pointers can make it here");
+
+ // Okay: this is the address of a non-static member, and therefore
+ // a member pointer constant.
+ Member = DRE->getDecl();
+ return Invalid;
+ }
+
+ // We found something else, but we don't know specifically what it is.
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
+ Diag(DRE->getDecl()->getLocation(),
+ diag::note_template_arg_refers_here);
+ return true;
+}
+
+/// \brief Check a template argument against its corresponding
+/// non-type template parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.nontype].
+/// It returns true if an error occurred, and false otherwise. \p
+/// InstantiatedParamType is the type of the non-type template
+/// parameter after it has been instantiated.
+///
+/// If Converted is non-NULL and no errors occur, the value
+/// of this argument will be added to the end of the Converted vector.
+bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
+ QualType InstantiatedParamType, Expr *&Arg,
+ llvm::SmallVectorImpl<TemplateArgument> *Converted) {
+ SourceLocation StartLoc = Arg->getSourceRange().getBegin();
+
+ // If either the parameter has a dependent type or the argument is
+ // type-dependent, there's nothing we can check now.
+ // FIXME: Add template argument to Converted!
+ if (InstantiatedParamType->isDependentType() || Arg->isTypeDependent()) {
+ // FIXME: Produce a cloned, canonical expression?
+ Converted->push_back(TemplateArgument(Arg));
+ return false;
+ }
+
+ // C++ [temp.arg.nontype]p5:
+ // The following conversions are performed on each expression used
+ // as a non-type template-argument. If a non-type
+ // template-argument cannot be converted to the type of the
+ // corresponding template-parameter then the program is
+ // ill-formed.
+ //
+ // -- for a non-type template-parameter of integral or
+ // enumeration type, integral promotions (4.5) and integral
+ // conversions (4.7) are applied.
+ QualType ParamType = InstantiatedParamType;
+ QualType ArgType = Arg->getType();
+ if (ParamType->isIntegralType() || ParamType->isEnumeralType()) {
+ // C++ [temp.arg.nontype]p1:
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of:
+ //
+ // -- an integral constant-expression of integral or enumeration
+ // type; or
+ // -- the name of a non-type template-parameter; or
+ SourceLocation NonConstantLoc;
+ llvm::APSInt Value;
+ if (!ArgType->isIntegralType() && !ArgType->isEnumeralType()) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_integral_or_enumeral)
+ << ArgType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ } else if (!Arg->isValueDependent() &&
+ !Arg->isIntegerConstantExpr(Value, Context, &NonConstantLoc)) {
+ Diag(NonConstantLoc, diag::err_template_arg_not_ice)
+ << ArgType << Arg->getSourceRange();
+ return true;
+ }
+
+ // FIXME: We need some way to more easily get the unqualified form
+ // of the types without going all the way to the
+ // canonical type.
+ if (Context.getCanonicalType(ParamType).getCVRQualifiers())
+ ParamType = Context.getCanonicalType(ParamType).getUnqualifiedType();
+ if (Context.getCanonicalType(ArgType).getCVRQualifiers())
+ ArgType = Context.getCanonicalType(ArgType).getUnqualifiedType();
+
+ // Try to convert the argument to the parameter's type.
+ if (ParamType == ArgType) {
+ // Okay: no conversion necessary
+ } else if (IsIntegralPromotion(Arg, ArgType, ParamType) ||
+ !ParamType->isEnumeralType()) {
+ // This is an integral promotion or conversion.
+ ImpCastExprToType(Arg, ParamType);
+ } else {
+ // We can't perform this conversion.
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_convertible)
+ << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ QualType IntegerType = Context.getCanonicalType(ParamType);
+ if (const EnumType *Enum = IntegerType->getAsEnumType())
+ IntegerType = Enum->getDecl()->getIntegerType();
+
+ if (!Arg->isValueDependent()) {
+ // Check that an unsigned parameter does not receive a negative
+ // value.
+ if (IntegerType->isUnsignedIntegerType()
+ && (Value.isSigned() && Value.isNegative())) {
+ Diag(Arg->getSourceRange().getBegin(), diag::err_template_arg_negative)
+ << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // Check that we don't overflow the template parameter type.
+ unsigned AllowedBits = Context.getTypeSize(IntegerType);
+ if (Value.getActiveBits() > AllowedBits) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_too_large)
+ << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ if (Value.getBitWidth() != AllowedBits)
+ Value.extOrTrunc(AllowedBits);
+ Value.setIsSigned(IntegerType->isSignedIntegerType());
+ }
+
+ if (Converted) {
+ // Add the value of this argument to the list of converted
+ // arguments. We use the bitwidth and signedness of the template
+ // parameter.
+ if (Arg->isValueDependent()) {
+ // The argument is value-dependent. Create a new
+ // TemplateArgument with the converted expression.
+ Converted->push_back(TemplateArgument(Arg));
+ return false;
+ }
+
+ Converted->push_back(TemplateArgument(StartLoc, Value,
+ ParamType->isEnumeralType() ? ParamType : IntegerType));
+ }
+
+ return false;
+ }
+
+ // Handle pointer-to-function, reference-to-function, and
+ // pointer-to-member-function all in (roughly) the same way.
+ if (// -- For a non-type template-parameter of type pointer to
+ // function, only the function-to-pointer conversion (4.3) is
+ // applied. If the template-argument represents a set of
+ // overloaded functions (or a pointer to such), the matching
+ // function is selected from the set (13.4).
+ // In C++0x, any std::nullptr_t value can be converted.
+ (ParamType->isPointerType() &&
+ ParamType->getAsPointerType()->getPointeeType()->isFunctionType()) ||
+ // -- For a non-type template-parameter of type reference to
+ // function, no conversions apply. If the template-argument
+ // represents a set of overloaded functions, the matching
+ // function is selected from the set (13.4).
+ (ParamType->isReferenceType() &&
+ ParamType->getAsReferenceType()->getPointeeType()->isFunctionType()) ||
+ // -- For a non-type template-parameter of type pointer to
+ // member function, no conversions apply. If the
+ // template-argument represents a set of overloaded member
+ // functions, the matching member function is selected from
+ // the set (13.4).
+ // Again, C++0x allows a std::nullptr_t value.
+ (ParamType->isMemberPointerType() &&
+ ParamType->getAsMemberPointerType()->getPointeeType()
+ ->isFunctionType())) {
+ if (Context.hasSameUnqualifiedType(ArgType,
+ ParamType.getNonReferenceType())) {
+ // We don't have to do anything: the types already match.
+ } else if (ArgType->isNullPtrType() && (ParamType->isPointerType() ||
+ ParamType->isMemberPointerType())) {
+ ArgType = ParamType;
+ ImpCastExprToType(Arg, ParamType);
+ } else if (ArgType->isFunctionType() && ParamType->isPointerType()) {
+ ArgType = Context.getPointerType(ArgType);
+ ImpCastExprToType(Arg, ArgType);
+ } else if (FunctionDecl *Fn
+ = ResolveAddressOfOverloadedFunction(Arg, ParamType, true)) {
+ if (DiagnoseUseOfDecl(Fn, Arg->getSourceRange().getBegin()))
+ return true;
+
+ FixOverloadedFunctionReference(Arg, Fn);
+ ArgType = Arg->getType();
+ if (ArgType->isFunctionType() && ParamType->isPointerType()) {
+ ArgType = Context.getPointerType(Arg->getType());
+ ImpCastExprToType(Arg, ArgType);
+ }
+ }
+
+ if (!Context.hasSameUnqualifiedType(ArgType,
+ ParamType.getNonReferenceType())) {
+ // We can't perform this conversion.
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_convertible)
+ << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ if (ParamType->isMemberPointerType()) {
+ NamedDecl *Member = 0;
+ if (CheckTemplateArgumentPointerToMember(Arg, Member))
+ return true;
+
+ if (Converted) {
+ Member = cast_or_null<NamedDecl>(Context.getCanonicalDecl(Member));
+ Converted->push_back(TemplateArgument(StartLoc, Member));
+ }
+
+ return false;
+ }
+
+ NamedDecl *Entity = 0;
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(Arg, Entity))
+ return true;
+
+ if (Converted) {
+ Entity = cast_or_null<NamedDecl>(Context.getCanonicalDecl(Entity));
+ Converted->push_back(TemplateArgument(StartLoc, Entity));
+ }
+ return false;
+ }
+
+ if (ParamType->isPointerType()) {
+ // -- for a non-type template-parameter of type pointer to
+ // object, qualification conversions (4.4) and the
+ // array-to-pointer conversion (4.2) are applied.
+ // C++0x also allows a value of std::nullptr_t.
+ assert(ParamType->getAsPointerType()->getPointeeType()->isObjectType() &&
+ "Only object pointers allowed here");
+
+ if (ArgType->isNullPtrType()) {
+ ArgType = ParamType;
+ ImpCastExprToType(Arg, ParamType);
+ } else if (ArgType->isArrayType()) {
+ ArgType = Context.getArrayDecayedType(ArgType);
+ ImpCastExprToType(Arg, ArgType);
+ }
+
+ if (IsQualificationConversion(ArgType, ParamType)) {
+ ArgType = ParamType;
+ ImpCastExprToType(Arg, ParamType);
+ }
+
+ if (!Context.hasSameUnqualifiedType(ArgType, ParamType)) {
+ // We can't perform this conversion.
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_convertible)
+ << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ NamedDecl *Entity = 0;
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(Arg, Entity))
+ return true;
+
+ if (Converted) {
+ Entity = cast_or_null<NamedDecl>(Context.getCanonicalDecl(Entity));
+ Converted->push_back(TemplateArgument(StartLoc, Entity));
+ }
+
+ return false;
+ }
+
+ if (const ReferenceType *ParamRefType = ParamType->getAsReferenceType()) {
+ // -- For a non-type template-parameter of type reference to
+ // object, no conversions apply. The type referred to by the
+ // reference may be more cv-qualified than the (otherwise
+ // identical) type of the template-argument. The
+ // template-parameter is bound directly to the
+ // template-argument, which must be an lvalue.
+ assert(ParamRefType->getPointeeType()->isObjectType() &&
+ "Only object references allowed here");
+
+ if (!Context.hasSameUnqualifiedType(ParamRefType->getPointeeType(), ArgType)) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_no_ref_bind)
+ << InstantiatedParamType << Arg->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ unsigned ParamQuals
+ = Context.getCanonicalType(ParamType).getCVRQualifiers();
+ unsigned ArgQuals = Context.getCanonicalType(ArgType).getCVRQualifiers();
+
+ if ((ParamQuals | ArgQuals) != ParamQuals) {
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_ref_bind_ignores_quals)
+ << InstantiatedParamType << Arg->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ NamedDecl *Entity = 0;
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(Arg, Entity))
+ return true;
+
+ if (Converted) {
+ Entity = cast<NamedDecl>(Context.getCanonicalDecl(Entity));
+ Converted->push_back(TemplateArgument(StartLoc, Entity));
+ }
+
+ return false;
+ }
+
+ // -- For a non-type template-parameter of type pointer to data
+ // member, qualification conversions (4.4) are applied.
+ // C++0x allows std::nullptr_t values.
+ assert(ParamType->isMemberPointerType() && "Only pointers to members remain");
+
+ if (Context.hasSameUnqualifiedType(ParamType, ArgType)) {
+ // Types match exactly: nothing more to do here.
+ } else if (ArgType->isNullPtrType()) {
+ ImpCastExprToType(Arg, ParamType);
+ } else if (IsQualificationConversion(ArgType, ParamType)) {
+ ImpCastExprToType(Arg, ParamType);
+ } else {
+ // We can't perform this conversion.
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::err_template_arg_not_convertible)
+ << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ NamedDecl *Member = 0;
+ if (CheckTemplateArgumentPointerToMember(Arg, Member))
+ return true;
+
+ if (Converted) {
+ Member = cast_or_null<NamedDecl>(Context.getCanonicalDecl(Member));
+ Converted->push_back(TemplateArgument(StartLoc, Member));
+ }
+
+ return false;
+}
+
+/// \brief Check a template argument against its corresponding
+/// template template parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.template].
+/// It returns true if an error occurred, and false otherwise.
+bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
+ DeclRefExpr *Arg) {
+ assert(isa<TemplateDecl>(Arg->getDecl()) && "Only template decls allowed");
+ TemplateDecl *Template = cast<TemplateDecl>(Arg->getDecl());
+
+ // C++ [temp.arg.template]p1:
+ // A template-argument for a template template-parameter shall be
+ // the name of a class template, expressed as id-expression. Only
+ // primary class templates are considered when matching the
+ // template template argument with the corresponding parameter;
+ // partial specializations are not considered even if their
+ // parameter lists match that of the template template parameter.
+ if (!isa<ClassTemplateDecl>(Template)) {
+ assert(isa<FunctionTemplateDecl>(Template) &&
+ "Only function templates are possible here");
+ Diag(Arg->getSourceRange().getBegin(),
+ diag::note_template_arg_refers_here_func)
+ << Template;
+ }
+
+ return !TemplateParameterListsAreEqual(Template->getTemplateParameters(),
+ Param->getTemplateParameters(),
+ true, true,
+ Arg->getSourceRange().getBegin());
+}
+
+/// \brief Determine whether the given template parameter lists are
+/// equivalent.
+///
+/// \param New The new template parameter list, typically written in the
+/// source code as part of a new template declaration.
+///
+/// \param Old The old template parameter list, typically found via
+/// name lookup of the template declared with this template parameter
+/// list.
+///
+/// \param Complain If true, this routine will produce a diagnostic if
+/// the template parameter lists are not equivalent.
+///
+/// \param IsTemplateTemplateParm If true, this routine is being
+/// called to compare the template parameter lists of a template
+/// template parameter.
+///
+/// \param TemplateArgLoc If this source location is valid, then we
+/// are actually checking the template parameter list of a template
+/// argument (New) against the template parameter list of its
+/// corresponding template template parameter (Old). We produce
+/// slightly different diagnostics in this scenario.
+///
+/// \returns True if the template parameter lists are equal, false
+/// otherwise.
+bool
+Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
+ TemplateParameterList *Old,
+ bool Complain,
+ bool IsTemplateTemplateParm,
+ SourceLocation TemplateArgLoc) {
+ if (Old->size() != New->size()) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_param_list_different_arity;
+ if (TemplateArgLoc.isValid()) {
+ Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_list_different_arity;
+ }
+ Diag(New->getTemplateLoc(), NextDiag)
+ << (New->size() > Old->size())
+ << IsTemplateTemplateParm
+ << SourceRange(New->getTemplateLoc(), New->getRAngleLoc());
+ Diag(Old->getTemplateLoc(), diag::note_template_prev_declaration)
+ << IsTemplateTemplateParm
+ << SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
+ }
+
+ return false;
+ }
+
+ for (TemplateParameterList::iterator OldParm = Old->begin(),
+ OldParmEnd = Old->end(), NewParm = New->begin();
+ OldParm != OldParmEnd; ++OldParm, ++NewParm) {
+ if ((*OldParm)->getKind() != (*NewParm)->getKind()) {
+ unsigned NextDiag = diag::err_template_param_different_kind;
+ if (TemplateArgLoc.isValid()) {
+ Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_different_kind;
+ }
+ Diag((*NewParm)->getLocation(), NextDiag)
+ << IsTemplateTemplateParm;
+ Diag((*OldParm)->getLocation(), diag::note_template_prev_declaration)
+ << IsTemplateTemplateParm;
+ return false;
+ }
+
+ if (isa<TemplateTypeParmDecl>(*OldParm)) {
+ // Okay; all template type parameters are equivalent (since we
+ // know we're at the same index).
+#if 0
+ // FIXME: Enable this code in debug mode *after* we properly go through
+ // and "instantiate" the template parameter lists of template template
+ // parameters. It's only after this instantiation that (1) any dependent
+ // types within the template parameter list of the template template
+ // parameter can be checked, and (2) the template type parameter depths
+ // will match up.
+ QualType OldParmType
+ = Context.getTypeDeclType(cast<TemplateTypeParmDecl>(*OldParm));
+ QualType NewParmType
+ = Context.getTypeDeclType(cast<TemplateTypeParmDecl>(*NewParm));
+ assert(Context.getCanonicalType(OldParmType) ==
+ Context.getCanonicalType(NewParmType) &&
+ "type parameter mismatch?");
+#endif
+ } else if (NonTypeTemplateParmDecl *OldNTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*OldParm)) {
+ // The types of non-type template parameters must agree.
+ NonTypeTemplateParmDecl *NewNTTP
+ = cast<NonTypeTemplateParmDecl>(*NewParm);
+ if (Context.getCanonicalType(OldNTTP->getType()) !=
+ Context.getCanonicalType(NewNTTP->getType())) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_nontype_parm_different_type;
+ if (TemplateArgLoc.isValid()) {
+ Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_nontype_parm_different_type;
+ }
+ Diag(NewNTTP->getLocation(), NextDiag)
+ << NewNTTP->getType()
+ << IsTemplateTemplateParm;
+ Diag(OldNTTP->getLocation(),
+ diag::note_template_nontype_parm_prev_declaration)
+ << OldNTTP->getType();
+ }
+ return false;
+ }
+ } else {
+ // The template parameter lists of template template
+ // parameters must agree.
+ // FIXME: Could we perform a faster "type" comparison here?
+ assert(isa<TemplateTemplateParmDecl>(*OldParm) &&
+ "Only template template parameters handled here");
+ TemplateTemplateParmDecl *OldTTP
+ = cast<TemplateTemplateParmDecl>(*OldParm);
+ TemplateTemplateParmDecl *NewTTP
+ = cast<TemplateTemplateParmDecl>(*NewParm);
+ if (!TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(),
+ OldTTP->getTemplateParameters(),
+ Complain,
+ /*IsTemplateTemplateParm=*/true,
+ TemplateArgLoc))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// \brief Check whether a template can be declared within this scope.
+///
+/// If the template declaration is valid in this scope, returns
+/// false. Otherwise, issues a diagnostic and returns true.
+bool
+Sema::CheckTemplateDeclScope(Scope *S,
+ MultiTemplateParamsArg &TemplateParameterLists) {
+ assert(TemplateParameterLists.size() > 0 && "Not a template");
+
+ // Find the nearest enclosing declaration scope.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ TemplateParameterList *TemplateParams =
+ static_cast<TemplateParameterList*>(*TemplateParameterLists.get());
+ SourceLocation TemplateLoc = TemplateParams->getTemplateLoc();
+ SourceRange TemplateRange
+ = SourceRange(TemplateLoc, TemplateParams->getRAngleLoc());
+
+ // C++ [temp]p2:
+ // A template-declaration can appear only as a namespace scope or
+ // class scope declaration.
+ DeclContext *Ctx = static_cast<DeclContext *>(S->getEntity());
+ while (Ctx && isa<LinkageSpecDecl>(Ctx)) {
+ if (cast<LinkageSpecDecl>(Ctx)->getLanguage() != LinkageSpecDecl::lang_cxx)
+ return Diag(TemplateLoc, diag::err_template_linkage)
+ << TemplateRange;
+
+ Ctx = Ctx->getParent();
+ }
+
+ if (Ctx && (Ctx->isFileContext() || Ctx->isRecord()))
+ return false;
+
+ return Diag(TemplateLoc, diag::err_template_outside_namespace_or_class_scope)
+ << TemplateRange;
+}
+
+/// \brief Check whether a class template specialization or explicit
+/// instantiation in the current context is well-formed.
+///
+/// This routine determines whether a class template specialization or
+/// explicit instantiation can be declared in the current context
+/// (C++ [temp.expl.spec]p2, C++0x [temp.explicit]p2) and emits
+/// appropriate diagnostics if there was an error. It returns true if
+// there was an error that we cannot recover from, and false otherwise.
+bool
+Sema::CheckClassTemplateSpecializationScope(ClassTemplateDecl *ClassTemplate,
+ ClassTemplateSpecializationDecl *PrevDecl,
+ SourceLocation TemplateNameLoc,
+ SourceRange ScopeSpecifierRange,
+ bool ExplicitInstantiation) {
+ // C++ [temp.expl.spec]p2:
+ // An explicit specialization shall be declared in the namespace
+ // of which the template is a member, or, for member templates, in
+ // the namespace of which the enclosing class or enclosing class
+ // template is a member. An explicit specialization of a member
+ // function, member class or static data member of a class
+ // template shall be declared in the namespace of which the class
+ // template is a member. Such a declaration may also be a
+ // definition. If the declaration is not a definition, the
+ // specialization may be defined later in the name- space in which
+ // the explicit specialization was declared, or in a namespace
+ // that encloses the one in which the explicit specialization was
+ // declared.
+ if (CurContext->getLookupContext()->isFunctionOrMethod()) {
+ Diag(TemplateNameLoc, diag::err_template_spec_decl_function_scope)
+ << ExplicitInstantiation << ClassTemplate;
+ return true;
+ }
+
+ DeclContext *DC = CurContext->getEnclosingNamespaceContext();
+ DeclContext *TemplateContext
+ = ClassTemplate->getDeclContext()->getEnclosingNamespaceContext();
+ if ((!PrevDecl || PrevDecl->getSpecializationKind() == TSK_Undeclared) &&
+ !ExplicitInstantiation) {
+ // There is no prior declaration of this entity, so this
+ // specialization must be in the same context as the template
+ // itself.
+ if (DC != TemplateContext) {
+ if (isa<TranslationUnitDecl>(TemplateContext))
+ Diag(TemplateNameLoc, diag::err_template_spec_decl_out_of_scope_global)
+ << ClassTemplate << ScopeSpecifierRange;
+ else if (isa<NamespaceDecl>(TemplateContext))
+ Diag(TemplateNameLoc, diag::err_template_spec_decl_out_of_scope)
+ << ClassTemplate << cast<NamedDecl>(TemplateContext)
+ << ScopeSpecifierRange;
+
+ Diag(ClassTemplate->getLocation(), diag::note_template_decl_here);
+ }
+
+ return false;
+ }
+
+ // We have a previous declaration of this entity. Make sure that
+ // this redeclaration (or definition) occurs in an enclosing namespace.
+ if (!CurContext->Encloses(TemplateContext)) {
+ // FIXME: In C++98, we would like to turn these errors into warnings,
+ // dependent on a -Wc++0x flag.
+ bool SuppressedDiag = false;
+ if (isa<TranslationUnitDecl>(TemplateContext)) {
+ if (!ExplicitInstantiation || getLangOptions().CPlusPlus0x)
+ Diag(TemplateNameLoc, diag::err_template_spec_redecl_global_scope)
+ << ExplicitInstantiation << ClassTemplate << ScopeSpecifierRange;
+ else
+ SuppressedDiag = true;
+ } else if (isa<NamespaceDecl>(TemplateContext)) {
+ if (!ExplicitInstantiation || getLangOptions().CPlusPlus0x)
+ Diag(TemplateNameLoc, diag::err_template_spec_redecl_out_of_scope)
+ << ExplicitInstantiation << ClassTemplate
+ << cast<NamedDecl>(TemplateContext) << ScopeSpecifierRange;
+ else
+ SuppressedDiag = true;
+ }
+
+ if (!SuppressedDiag)
+ Diag(ClassTemplate->getLocation(), diag::note_template_decl_here);
+ }
+
+ return false;
+}
+
+Sema::DeclResult
+Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagKind TK,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ TemplateTy TemplateD,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation *TemplateArgLocs,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TemplateParameterLists) {
+ // Find the class template we're specializing
+ TemplateName Name = TemplateD.getAsVal<TemplateName>();
+ ClassTemplateDecl *ClassTemplate
+ = cast<ClassTemplateDecl>(Name.getAsTemplateDecl());
+
+ bool isPartialSpecialization = false;
+
+ // Check the validity of the template headers that introduce this
+ // template.
+ // FIXME: Once we have member templates, we'll need to check
+ // C++ [temp.expl.spec]p17-18, where we could have multiple levels of
+ // template<> headers.
+ if (TemplateParameterLists.size() == 0)
+ Diag(KWLoc, diag::err_template_spec_needs_header)
+ << CodeModificationHint::CreateInsertion(KWLoc, "template<> ");
+ else {
+ TemplateParameterList *TemplateParams
+ = static_cast<TemplateParameterList*>(*TemplateParameterLists.get());
+ if (TemplateParameterLists.size() > 1) {
+ Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_spec_extra_headers);
+ return true;
+ }
+
+ // FIXME: We'll need more checks, here!
+ if (TemplateParams->size() > 0)
+ isPartialSpecialization = true;
+ }
+
+ // Check that the specialization uses the same tag kind as the
+ // original template.
+ TagDecl::TagKind Kind;
+ switch (TagSpec) {
+ default: assert(0 && "Unknown tag type!");
+ case DeclSpec::TST_struct: Kind = TagDecl::TK_struct; break;
+ case DeclSpec::TST_union: Kind = TagDecl::TK_union; break;
+ case DeclSpec::TST_class: Kind = TagDecl::TK_class; break;
+ }
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
+ Kind, KWLoc,
+ *ClassTemplate->getIdentifier())) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << ClassTemplate
+ << CodeModificationHint::CreateReplacement(KWLoc,
+ ClassTemplate->getTemplatedDecl()->getKindName());
+ Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
+ diag::note_previous_use);
+ Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
+ }
+
+ // Translate the parser's template argument list in our AST format.
+ llvm::SmallVector<TemplateArgument, 16> TemplateArgs;
+ translateTemplateArguments(TemplateArgsIn, TemplateArgLocs, TemplateArgs);
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ llvm::SmallVector<TemplateArgument, 16> ConvertedTemplateArgs;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, LAngleLoc,
+ &TemplateArgs[0], TemplateArgs.size(),
+ RAngleLoc, ConvertedTemplateArgs))
+ return true;
+
+ assert((ConvertedTemplateArgs.size() ==
+ ClassTemplate->getTemplateParameters()->size()) &&
+ "Converted template argument list is too short!");
+
+ // Find the class template (partial) specialization declaration that
+ // corresponds to these arguments.
+ llvm::FoldingSetNodeID ID;
+ if (isPartialSpecialization)
+ // FIXME: Template parameter list matters, too
+ ClassTemplatePartialSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size());
+ else
+ ClassTemplateSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size());
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *PrevDecl = 0;
+
+ if (isPartialSpecialization)
+ PrevDecl
+ = ClassTemplate->getPartialSpecializations().FindNodeOrInsertPos(ID,
+ InsertPos);
+ else
+ PrevDecl
+ = ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
+
+ ClassTemplateSpecializationDecl *Specialization = 0;
+
+ // Check whether we can declare a class template specialization in
+ // the current scope.
+ if (CheckClassTemplateSpecializationScope(ClassTemplate, PrevDecl,
+ TemplateNameLoc,
+ SS.getRange(),
+ /*ExplicitInstantiation=*/false))
+ return true;
+
+ if (PrevDecl && PrevDecl->getSpecializationKind() == TSK_Undeclared) {
+ // Since the only prior class template specialization with these
+ // arguments was referenced but not declared, reuse that
+ // declaration node as our own, updating its source location to
+ // reflect our new declaration.
+ Specialization = PrevDecl;
+ Specialization->setLocation(TemplateNameLoc);
+ PrevDecl = 0;
+ } else if (isPartialSpecialization) {
+ // FIXME: extra checking for partial specializations
+
+ // Create a new class template partial specialization declaration node.
+ TemplateParameterList *TemplateParams
+ = static_cast<TemplateParameterList*>(*TemplateParameterLists.get());
+ ClassTemplatePartialSpecializationDecl *PrevPartial
+ = cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
+ ClassTemplatePartialSpecializationDecl *Partial
+ = ClassTemplatePartialSpecializationDecl::Create(Context,
+ ClassTemplate->getDeclContext(),
+ TemplateNameLoc,
+ TemplateParams,
+ ClassTemplate,
+ &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size(),
+ PrevPartial);
+
+ if (PrevPartial) {
+ ClassTemplate->getPartialSpecializations().RemoveNode(PrevPartial);
+ ClassTemplate->getPartialSpecializations().GetOrInsertNode(Partial);
+ } else {
+ ClassTemplate->getPartialSpecializations().InsertNode(Partial, InsertPos);
+ }
+ Specialization = Partial;
+ } else {
+ // Create a new class template specialization declaration node for
+ // this explicit specialization.
+ Specialization
+ = ClassTemplateSpecializationDecl::Create(Context,
+ ClassTemplate->getDeclContext(),
+ TemplateNameLoc,
+ ClassTemplate,
+ &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size(),
+ PrevDecl);
+
+ if (PrevDecl) {
+ ClassTemplate->getSpecializations().RemoveNode(PrevDecl);
+ ClassTemplate->getSpecializations().GetOrInsertNode(Specialization);
+ } else {
+ ClassTemplate->getSpecializations().InsertNode(Specialization,
+ InsertPos);
+ }
+ }
+
+ // Note that this is an explicit specialization.
+ Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
+
+ // Check that this isn't a redefinition of this specialization.
+ if (TK == TK_Definition) {
+ if (RecordDecl *Def = Specialization->getDefinition(Context)) {
+ // FIXME: Should also handle explicit specialization after implicit
+ // instantiation with a special diagnostic.
+ SourceRange Range(TemplateNameLoc, RAngleLoc);
+ Diag(TemplateNameLoc, diag::err_redefinition)
+ << Context.getTypeDeclType(Specialization) << Range;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ Specialization->setInvalidDecl();
+ return true;
+ }
+ }
+
+ // Build the fully-sugared type for this class template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ QualType WrittenTy
+ = Context.getTemplateSpecializationType(Name,
+ &TemplateArgs[0],
+ TemplateArgs.size(),
+ Context.getTypeDeclType(Specialization));
+ Specialization->setTypeAsWritten(WrittenTy);
+ TemplateArgsIn.release();
+
+ // C++ [temp.expl.spec]p9:
+ // A template explicit specialization is in the scope of the
+ // namespace in which the template was defined.
+ //
+ // We actually implement this paragraph where we set the semantic
+ // context (in the creation of the ClassTemplateSpecializationDecl),
+ // but we also maintain the lexical context where the actual
+ // definition occurs.
+ Specialization->setLexicalDeclContext(CurContext);
+
+ // We may be starting the definition of this specialization.
+ if (TK == TK_Definition)
+ Specialization->startDefinition();
+
+ // Add the specialization into its lexical context, so that it can
+ // be seen when iterating through the list of declarations in that
+ // context. However, specializations are not found by name lookup.
+ CurContext->addDecl(Context, Specialization);
+ return DeclPtrTy::make(Specialization);
+}
+
+// Explicit instantiation of a class template specialization
+Sema::DeclResult
+Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ TemplateTy TemplateD,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation *TemplateArgLocs,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr) {
+ // Find the class template we're specializing
+ TemplateName Name = TemplateD.getAsVal<TemplateName>();
+ ClassTemplateDecl *ClassTemplate
+ = cast<ClassTemplateDecl>(Name.getAsTemplateDecl());
+
+ // Check that the specialization uses the same tag kind as the
+ // original template.
+ TagDecl::TagKind Kind;
+ switch (TagSpec) {
+ default: assert(0 && "Unknown tag type!");
+ case DeclSpec::TST_struct: Kind = TagDecl::TK_struct; break;
+ case DeclSpec::TST_union: Kind = TagDecl::TK_union; break;
+ case DeclSpec::TST_class: Kind = TagDecl::TK_class; break;
+ }
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
+ Kind, KWLoc,
+ *ClassTemplate->getIdentifier())) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << ClassTemplate
+ << CodeModificationHint::CreateReplacement(KWLoc,
+ ClassTemplate->getTemplatedDecl()->getKindName());
+ Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
+ diag::note_previous_use);
+ Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
+ }
+
+ // C++0x [temp.explicit]p2:
+ // [...] An explicit instantiation shall appear in an enclosing
+ // namespace of its template. [...]
+ //
+ // This is C++ DR 275.
+ if (CheckClassTemplateSpecializationScope(ClassTemplate, 0,
+ TemplateNameLoc,
+ SS.getRange(),
+ /*ExplicitInstantiation=*/true))
+ return true;
+
+ // Translate the parser's template argument list in our AST format.
+ llvm::SmallVector<TemplateArgument, 16> TemplateArgs;
+ translateTemplateArguments(TemplateArgsIn, TemplateArgLocs, TemplateArgs);
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ llvm::SmallVector<TemplateArgument, 16> ConvertedTemplateArgs;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, LAngleLoc,
+ &TemplateArgs[0], TemplateArgs.size(),
+ RAngleLoc, ConvertedTemplateArgs))
+ return true;
+
+ assert((ConvertedTemplateArgs.size() ==
+ ClassTemplate->getTemplateParameters()->size()) &&
+ "Converted template argument list is too short!");
+
+ // Find the class template specialization declaration that
+ // corresponds to these arguments.
+ llvm::FoldingSetNodeID ID;
+ ClassTemplateSpecializationDecl::Profile(ID, &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size());
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *PrevDecl
+ = ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
+
+ ClassTemplateSpecializationDecl *Specialization = 0;
+
+ bool SpecializationRequiresInstantiation = true;
+ if (PrevDecl) {
+ if (PrevDecl->getSpecializationKind() == TSK_ExplicitInstantiation) {
+ // This particular specialization has already been declared or
+ // instantiated. We cannot explicitly instantiate it.
+ Diag(TemplateNameLoc, diag::err_explicit_instantiation_duplicate)
+ << Context.getTypeDeclType(PrevDecl);
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_explicit_instantiation);
+ return DeclPtrTy::make(PrevDecl);
+ }
+
+ if (PrevDecl->getSpecializationKind() == TSK_ExplicitSpecialization) {
+ // C++ DR 259, C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit
+ // instantiation of a template appears after a declaration of
+ // an explicit specialization for that template, the explicit
+ // instantiation has no effect.
+ if (!getLangOptions().CPlusPlus0x) {
+ Diag(TemplateNameLoc,
+ diag::ext_explicit_instantiation_after_specialization)
+ << Context.getTypeDeclType(PrevDecl);
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_template_specialization);
+ }
+
+ // Create a new class template specialization declaration node
+ // for this explicit specialization. This node is only used to
+ // record the existence of this explicit instantiation for
+ // accurate reproduction of the source code; we don't actually
+ // use it for anything, since it is semantically irrelevant.
+ Specialization
+ = ClassTemplateSpecializationDecl::Create(Context,
+ ClassTemplate->getDeclContext(),
+ TemplateNameLoc,
+ ClassTemplate,
+ &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size(),
+ 0);
+ Specialization->setLexicalDeclContext(CurContext);
+ CurContext->addDecl(Context, Specialization);
+ return DeclPtrTy::make(Specialization);
+ }
+
+ // If we have already (implicitly) instantiated this
+ // specialization, there is less work to do.
+ if (PrevDecl->getSpecializationKind() == TSK_ImplicitInstantiation)
+ SpecializationRequiresInstantiation = false;
+
+ // Since the only prior class template specialization with these
+ // arguments was referenced but not declared, reuse that
+ // declaration node as our own, updating its source location to
+ // reflect our new declaration.
+ Specialization = PrevDecl;
+ Specialization->setLocation(TemplateNameLoc);
+ PrevDecl = 0;
+ } else {
+ // Create a new class template specialization declaration node for
+ // this explicit specialization.
+ Specialization
+ = ClassTemplateSpecializationDecl::Create(Context,
+ ClassTemplate->getDeclContext(),
+ TemplateNameLoc,
+ ClassTemplate,
+ &ConvertedTemplateArgs[0],
+ ConvertedTemplateArgs.size(),
+ 0);
+
+ ClassTemplate->getSpecializations().InsertNode(Specialization,
+ InsertPos);
+ }
+
+ // Build the fully-sugared type for this explicit instantiation as
+ // the user wrote in the explicit instantiation itself. This means
+ // that we'll pretty-print the type retrieved from the
+ // specialization's declaration the way that the user actually wrote
+ // the explicit instantiation, rather than formatting the name based
+ // on the "canonical" representation used to store the template
+ // arguments in the specialization.
+ QualType WrittenTy
+ = Context.getTemplateSpecializationType(Name,
+ &TemplateArgs[0],
+ TemplateArgs.size(),
+ Context.getTypeDeclType(Specialization));
+ Specialization->setTypeAsWritten(WrittenTy);
+ TemplateArgsIn.release();
+
+ // Add the explicit instantiation into its lexical context. However,
+ // since explicit instantiations are never found by name lookup, we
+ // just put it into the declaration context directly.
+ Specialization->setLexicalDeclContext(CurContext);
+ CurContext->addDecl(Context, Specialization);
+
+ // C++ [temp.explicit]p3:
+ // A definition of a class template or class member template
+ // shall be in scope at the point of the explicit instantiation of
+ // the class template or class member template.
+ //
+ // This check comes when we actually try to perform the
+ // instantiation.
+ if (SpecializationRequiresInstantiation)
+ InstantiateClassTemplateSpecialization(Specialization, true);
+ else // Instantiate the members of this class template specialization.
+ InstantiateClassTemplateSpecializationMembers(TemplateLoc, Specialization);
+
+ return DeclPtrTy::make(Specialization);
+}
+
+// Explicit instantiation of a member class of a class template.
+Sema::DeclResult
+Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ AttributeList *Attr) {
+
+ bool Owned = false;
+ DeclPtrTy TagD = ActOnTag(S, TagSpec, Action::TK_Reference,
+ KWLoc, SS, Name, NameLoc, Attr, AS_none, Owned);
+ if (!TagD)
+ return true;
+
+ TagDecl *Tag = cast<TagDecl>(TagD.getAs<Decl>());
+ if (Tag->isEnum()) {
+ Diag(TemplateLoc, diag::err_explicit_instantiation_enum)
+ << Context.getTypeDeclType(Tag);
+ return true;
+ }
+
+ if (Tag->isInvalidDecl())
+ return true;
+
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Tag);
+ CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
+ if (!Pattern) {
+ Diag(TemplateLoc, diag::err_explicit_instantiation_nontemplate_type)
+ << Context.getTypeDeclType(Record);
+ Diag(Record->getLocation(), diag::note_nontemplate_decl_here);
+ return true;
+ }
+
+ // C++0x [temp.explicit]p2:
+ // [...] An explicit instantiation shall appear in an enclosing
+ // namespace of its template. [...]
+ //
+ // This is C++ DR 275.
+ if (getLangOptions().CPlusPlus0x) {
+ // FIXME: In C++98, we would like to turn these errors into warnings,
+ // dependent on a -Wc++0x flag.
+ DeclContext *PatternContext
+ = Pattern->getDeclContext()->getEnclosingNamespaceContext();
+ if (!CurContext->Encloses(PatternContext)) {
+ Diag(TemplateLoc, diag::err_explicit_instantiation_out_of_scope)
+ << Record << cast<NamedDecl>(PatternContext) << SS.getRange();
+ Diag(Pattern->getLocation(), diag::note_previous_declaration);
+ }
+ }
+
+ if (!Record->getDefinition(Context)) {
+ // If the class has a definition, instantiate it (and all of its
+ // members, recursively).
+ Pattern = cast_or_null<CXXRecordDecl>(Pattern->getDefinition(Context));
+ if (Pattern && InstantiateClass(TemplateLoc, Record, Pattern,
+ getTemplateInstantiationArgs(Record),
+ /*ExplicitInstantiation=*/true))
+ return true;
+ } else // Instantiate all of the members of class.
+ InstantiateClassMembers(TemplateLoc, Record,
+ getTemplateInstantiationArgs(Record));
+
+ // FIXME: We don't have any representation for explicit instantiations of
+ // member classes. Such a representation is not needed for compilation, but it
+ // should be available for clients that want to see all of the declarations in
+ // the source code.
+ return TagD;
+}
+
+Sema::TypeResult
+Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
+ const IdentifierInfo &II, SourceLocation IdLoc) {
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ if (!NNS)
+ return true;
+
+ QualType T = CheckTypenameType(NNS, II, SourceRange(TypenameLoc, IdLoc));
+ if (T.isNull())
+ return true;
+ return T.getAsOpaquePtr();
+}
+
+Sema::TypeResult
+Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS,
+ SourceLocation TemplateLoc, TypeTy *Ty) {
+ QualType T = QualType::getFromOpaquePtr(Ty);
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ const TemplateSpecializationType *TemplateId
+ = T->getAsTemplateSpecializationType();
+ assert(TemplateId && "Expected a template specialization type");
+
+ if (NNS->isDependent())
+ return Context.getTypenameType(NNS, TemplateId).getAsOpaquePtr();
+
+ return Context.getQualifiedNameType(NNS, T).getAsOpaquePtr();
+}
+
+/// \brief Build the type that describes a C++ typename specifier,
+/// e.g., "typename T::type".
+QualType
+Sema::CheckTypenameType(NestedNameSpecifier *NNS, const IdentifierInfo &II,
+ SourceRange Range) {
+ CXXRecordDecl *CurrentInstantiation = 0;
+ if (NNS->isDependent()) {
+ CurrentInstantiation = getCurrentInstantiationOf(NNS);
+
+ // If the nested-name-specifier does not refer to the current
+ // instantiation, then build a typename type.
+ if (!CurrentInstantiation)
+ return Context.getTypenameType(NNS, &II);
+ }
+
+ DeclContext *Ctx = 0;
+
+ if (CurrentInstantiation)
+ Ctx = CurrentInstantiation;
+ else {
+ CXXScopeSpec SS;
+ SS.setScopeRep(NNS);
+ SS.setRange(Range);
+ if (RequireCompleteDeclContext(SS))
+ return QualType();
+
+ Ctx = computeDeclContext(SS);
+ }
+ assert(Ctx && "No declaration context?");
+
+ DeclarationName Name(&II);
+ LookupResult Result = LookupQualifiedName(Ctx, Name, LookupOrdinaryName,
+ false);
+ unsigned DiagID = 0;
+ Decl *Referenced = 0;
+ switch (Result.getKind()) {
+ case LookupResult::NotFound:
+ if (Ctx->isTranslationUnit())
+ DiagID = diag::err_typename_nested_not_found_global;
+ else
+ DiagID = diag::err_typename_nested_not_found;
+ break;
+
+ case LookupResult::Found:
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getAsDecl())) {
+ // We found a type. Build a QualifiedNameType, since the
+ // typename-specifier was just sugar. FIXME: Tell
+ // QualifiedNameType that it has a "typename" prefix.
+ return Context.getQualifiedNameType(NNS, Context.getTypeDeclType(Type));
+ }
+
+ DiagID = diag::err_typename_nested_not_type;
+ Referenced = Result.getAsDecl();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ DiagID = diag::err_typename_nested_not_type;
+ Referenced = *Result.begin();
+ break;
+
+ case LookupResult::AmbiguousBaseSubobjectTypes:
+ case LookupResult::AmbiguousBaseSubobjects:
+ case LookupResult::AmbiguousReference:
+ DiagnoseAmbiguousLookup(Result, Name, Range.getEnd(), Range);
+ return QualType();
+ }
+
+ // If we get here, it's because name lookup did not find a
+ // type. Emit an appropriate diagnostic and return an error.
+ if (NamedDecl *NamedCtx = dyn_cast<NamedDecl>(Ctx))
+ Diag(Range.getEnd(), DiagID) << Range << Name << NamedCtx;
+ else
+ Diag(Range.getEnd(), DiagID) << Range << Name;
+ if (Referenced)
+ Diag(Referenced->getLocation(), diag::note_typename_refers_here)
+ << Name;
+ return QualType();
+}
+
+// FIXME: Move to SemaTemplateDeduction.cpp
+bool
+Sema::DeduceTemplateArguments(QualType Param, QualType Arg,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ // We only want to look at the canonical types, since typedefs and
+ // sugar are not part of template argument deduction.
+ Param = Context.getCanonicalType(Param);
+ Arg = Context.getCanonicalType(Arg);
+
+ // If the parameter type is not dependent, just compare the types
+ // directly.
+ if (!Param->isDependentType())
+ return Param == Arg;
+
+ // FIXME: Use a visitor or switch to handle all of the kinds of
+ // types that the parameter may be.
+ if (const TemplateTypeParmType *TemplateTypeParm
+ = Param->getAsTemplateTypeParmType()) {
+ (void)TemplateTypeParm; // FIXME: use this
+ // The argument type can not be less qualified than the parameter
+ // type.
+ if (Param.isMoreQualifiedThan(Arg))
+ return false;
+
+ unsigned Quals = Arg.getCVRQualifiers() & ~Param.getCVRQualifiers();
+ QualType DeducedType = Arg.getQualifiedType(Quals);
+ // FIXME: actually save the deduced type, and check that this
+ // deduction is consistent.
+ return true;
+ }
+
+ if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
+ return false;
+
+ if (const PointerType *PointerParam = Param->getAsPointerType()) {
+ const PointerType *PointerArg = Arg->getAsPointerType();
+ if (!PointerArg)
+ return false;
+
+ return DeduceTemplateArguments(PointerParam->getPointeeType(),
+ PointerArg->getPointeeType(),
+ Deduced);
+ }
+
+ // FIXME: Many more cases to go (to go).
+ return false;
+}
+
+bool
+Sema::DeduceTemplateArguments(const TemplateArgument &Param,
+ const TemplateArgument &Arg,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ assert(Param.getKind() == Arg.getKind() &&
+ "Template argument kind mismatch during deduction");
+ switch (Param.getKind()) {
+ case TemplateArgument::Type:
+ return DeduceTemplateArguments(Param.getAsType(), Arg.getAsType(),
+ Deduced);
+
+ default:
+ return false;
+ }
+}
+
+bool
+Sema::DeduceTemplateArguments(const TemplateArgumentList &ParamList,
+ const TemplateArgumentList &ArgList,
+ llvm::SmallVectorImpl<TemplateArgument> &Deduced) {
+ assert(ParamList.size() == ArgList.size());
+ for (unsigned I = 0, N = ParamList.size(); I != N; ++I) {
+ if (!DeduceTemplateArguments(ParamList[I], ArgList[I], Deduced))
+ return false;
+ }
+ return true;
+}
+
+
+bool
+Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs) {
+ llvm::SmallVector<TemplateArgument, 4> Deduced;
+ Deduced.resize(Partial->getTemplateParameters()->size());
+ return DeduceTemplateArguments(Partial->getTemplateArgs(), TemplateArgs,
+ Deduced);
+}
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
new file mode 100644
index 0000000..d3d771b
--- /dev/null
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -0,0 +1,1034 @@
+//===------- SemaTemplateInstantiate.cpp - C++ Template Instantiation ------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation.
+//
+//===----------------------------------------------------------------------===/
+
+#include "Sema.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===/
+// Template Instantiation Support
+//===----------------------------------------------------------------------===/
+
+/// \brief Retrieve the template argument list that should be used to
+/// instantiate the given declaration.
+const TemplateArgumentList &
+Sema::getTemplateInstantiationArgs(NamedDecl *D) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D))
+ return Spec->getTemplateArgs();
+
+ DeclContext *EnclosingTemplateCtx = D->getDeclContext();
+ while (!isa<ClassTemplateSpecializationDecl>(EnclosingTemplateCtx)) {
+ assert(!EnclosingTemplateCtx->isFileContext() &&
+ "Tried to get the instantiation arguments of a non-template");
+ EnclosingTemplateCtx = EnclosingTemplateCtx->getParent();
+ }
+
+ ClassTemplateSpecializationDecl *EnclosingTemplate
+ = cast<ClassTemplateSpecializationDecl>(EnclosingTemplateCtx);
+ return EnclosingTemplate->getTemplateArgs();
+}
+
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ Decl *Entity,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef) {
+
+ Invalid = CheckInstantiationDepth(PointOfInstantiation,
+ InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::TemplateInstantiation;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Entity);
+ Inst.TemplateArgs = 0;
+ Inst.NumTemplateArgs = 0;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ Invalid = false;
+ }
+}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
+ SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef) {
+
+ Invalid = CheckInstantiationDepth(PointOfInstantiation,
+ InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind
+ = ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Template);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ Invalid = false;
+ }
+}
+
+void Sema::InstantiatingTemplate::Clear() {
+ if (!Invalid) {
+ SemaRef.ActiveTemplateInstantiations.pop_back();
+ Invalid = true;
+ }
+}
+
+bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
+ SourceLocation PointOfInstantiation,
+ SourceRange InstantiationRange) {
+ if (SemaRef.ActiveTemplateInstantiations.size()
+ <= SemaRef.getLangOptions().InstantiationDepth)
+ return false;
+
+ SemaRef.Diag(PointOfInstantiation,
+ diag::err_template_recursion_depth_exceeded)
+ << SemaRef.getLangOptions().InstantiationDepth
+ << InstantiationRange;
+ SemaRef.Diag(PointOfInstantiation, diag::note_template_recursion_depth)
+ << SemaRef.getLangOptions().InstantiationDepth;
+ return true;
+}
+
+/// \brief Prints the current instantiation stack through a series of
+/// notes.
+void Sema::PrintInstantiationStack() {
+ for (llvm::SmallVector<ActiveTemplateInstantiation, 16>::reverse_iterator
+ Active = ActiveTemplateInstantiations.rbegin(),
+ ActiveEnd = ActiveTemplateInstantiations.rend();
+ Active != ActiveEnd;
+ ++Active) {
+ switch (Active->Kind) {
+ case ActiveTemplateInstantiation::TemplateInstantiation: {
+ Decl *D = reinterpret_cast<Decl *>(Active->Entity);
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ unsigned DiagID = diag::note_template_member_class_here;
+ if (isa<ClassTemplateSpecializationDecl>(Record))
+ DiagID = diag::note_template_class_instantiation_here;
+ Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ DiagID)
+ << Context.getTypeDeclType(Record)
+ << Active->InstantiationRange;
+ } else {
+ FunctionDecl *Function = cast<FunctionDecl>(D);
+ unsigned DiagID = diag::note_template_member_function_here;
+ // FIXME: check for a function template
+ Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ DiagID)
+ << Function
+ << Active->InstantiationRange;
+ }
+ break;
+ }
+
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation: {
+ TemplateDecl *Template = cast<TemplateDecl>((Decl *)Active->Entity);
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ Active->TemplateArgs,
+ Active->NumTemplateArgs,
+ Context.PrintingPolicy);
+ Diags.Report(FullSourceLoc(Active->PointOfInstantiation, SourceMgr),
+ diag::note_default_arg_instantiation_here)
+ << (Template->getNameAsString() + TemplateArgsStr)
+ << Active->InstantiationRange;
+ break;
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===/
+// Template Instantiation for Types
+//===----------------------------------------------------------------------===/
+namespace {
+ class VISIBILITY_HIDDEN TemplateTypeInstantiator {
+ Sema &SemaRef;
+ const TemplateArgumentList &TemplateArgs;
+ SourceLocation Loc;
+ DeclarationName Entity;
+
+ public:
+ TemplateTypeInstantiator(Sema &SemaRef,
+ const TemplateArgumentList &TemplateArgs,
+ SourceLocation Loc,
+ DeclarationName Entity)
+ : SemaRef(SemaRef), TemplateArgs(TemplateArgs),
+ Loc(Loc), Entity(Entity) { }
+
+ QualType operator()(QualType T) const { return Instantiate(T); }
+
+ QualType Instantiate(QualType T) const;
+
+ // Declare instantiate functions for each type.
+#define TYPE(Class, Base) \
+ QualType Instantiate##Class##Type(const Class##Type *T, \
+ unsigned Quals) const;
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ };
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateExtQualType(const ExtQualType *T,
+ unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate ExtQualType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateBuiltinType(const BuiltinType *T,
+ unsigned Quals) const {
+ assert(false && "Builtin types are not dependent and cannot be instantiated");
+ return QualType(T, Quals);
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateFixedWidthIntType(const FixedWidthIntType *T, unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate FixedWidthIntType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateComplexType(const ComplexType *T,
+ unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate ComplexType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::InstantiatePointerType(const PointerType *T,
+ unsigned Quals) const {
+ QualType PointeeType = Instantiate(T->getPointeeType());
+ if (PointeeType.isNull())
+ return QualType();
+
+ return SemaRef.BuildPointerType(PointeeType, Quals, Loc, Entity);
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateBlockPointerType(const BlockPointerType *T,
+ unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate BlockPointerType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateLValueReferenceType(
+ const LValueReferenceType *T, unsigned Quals) const {
+ QualType ReferentType = Instantiate(T->getPointeeType());
+ if (ReferentType.isNull())
+ return QualType();
+
+ return SemaRef.BuildReferenceType(ReferentType, true, Quals, Loc, Entity);
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateRValueReferenceType(
+ const RValueReferenceType *T, unsigned Quals) const {
+ QualType ReferentType = Instantiate(T->getPointeeType());
+ if (ReferentType.isNull())
+ return QualType();
+
+ return SemaRef.BuildReferenceType(ReferentType, false, Quals, Loc, Entity);
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateMemberPointerType(const MemberPointerType *T,
+ unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate MemberPointerType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateConstantArrayType(const ConstantArrayType *T,
+ unsigned Quals) const {
+ QualType ElementType = Instantiate(T->getElementType());
+ if (ElementType.isNull())
+ return ElementType;
+
+ // Build a temporary integer literal to specify the size for
+ // BuildArrayType. Since we have already checked the size as part of
+ // creating the dependent array type in the first place, we know
+ // there aren't any errors. However, we do need to determine what
+ // C++ type to give the size expression.
+ llvm::APInt Size = T->getSize();
+ QualType Types[] = {
+ SemaRef.Context.UnsignedCharTy, SemaRef.Context.UnsignedShortTy,
+ SemaRef.Context.UnsignedIntTy, SemaRef.Context.UnsignedLongTy,
+ SemaRef.Context.UnsignedLongLongTy, SemaRef.Context.UnsignedInt128Ty
+ };
+ const unsigned NumTypes = sizeof(Types) / sizeof(QualType);
+ QualType SizeType;
+ for (unsigned I = 0; I != NumTypes; ++I)
+ if (Size.getBitWidth() == SemaRef.Context.getIntWidth(Types[I])) {
+ SizeType = Types[I];
+ break;
+ }
+
+ if (SizeType.isNull())
+ SizeType = SemaRef.Context.getFixedWidthIntType(Size.getBitWidth(), false);
+
+ IntegerLiteral ArraySize(Size, SizeType, Loc);
+ return SemaRef.BuildArrayType(ElementType, T->getSizeModifier(),
+ &ArraySize, T->getIndexTypeQualifier(),
+ Loc, Entity);
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateIncompleteArrayType(const IncompleteArrayType *T,
+ unsigned Quals) const {
+ QualType ElementType = Instantiate(T->getElementType());
+ if (ElementType.isNull())
+ return ElementType;
+
+ return SemaRef.BuildArrayType(ElementType, T->getSizeModifier(),
+ 0, T->getIndexTypeQualifier(),
+ Loc, Entity);
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateVariableArrayType(const VariableArrayType *T,
+ unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate VariableArrayType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateDependentSizedArrayType(const DependentSizedArrayType *T,
+ unsigned Quals) const {
+ Expr *ArraySize = T->getSizeExpr();
+ assert(ArraySize->isValueDependent() &&
+ "dependent sized array types must have value dependent size expr");
+
+ // Instantiate the element type if needed
+ QualType ElementType = T->getElementType();
+ if (ElementType->isDependentType()) {
+ ElementType = Instantiate(ElementType);
+ if (ElementType.isNull())
+ return QualType();
+ }
+
+ // Instantiate the size expression
+ Sema::OwningExprResult InstantiatedArraySize =
+ SemaRef.InstantiateExpr(ArraySize, TemplateArgs);
+ if (InstantiatedArraySize.isInvalid())
+ return QualType();
+
+ return SemaRef.BuildArrayType(ElementType, T->getSizeModifier(),
+ InstantiatedArraySize.takeAs<Expr>(),
+ T->getIndexTypeQualifier(), Loc, Entity);
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateVectorType(const VectorType *T,
+ unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate VectorType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateExtVectorType(const ExtVectorType *T,
+ unsigned Quals) const {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate ExtVectorType yet");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateFunctionProtoType(const FunctionProtoType *T,
+ unsigned Quals) const {
+ QualType ResultType = Instantiate(T->getResultType());
+ if (ResultType.isNull())
+ return ResultType;
+
+ llvm::SmallVector<QualType, 4> ParamTypes;
+ for (FunctionProtoType::arg_type_iterator Param = T->arg_type_begin(),
+ ParamEnd = T->arg_type_end();
+ Param != ParamEnd; ++Param) {
+ QualType P = Instantiate(*Param);
+ if (P.isNull())
+ return P;
+
+ ParamTypes.push_back(P);
+ }
+
+ return SemaRef.BuildFunctionType(ResultType, &ParamTypes[0],
+ ParamTypes.size(),
+ T->isVariadic(), T->getTypeQuals(),
+ Loc, Entity);
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateFunctionNoProtoType(const FunctionNoProtoType *T,
+ unsigned Quals) const {
+ assert(false && "Functions without prototypes cannot be dependent.");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateTypedefType(const TypedefType *T,
+ unsigned Quals) const {
+ TypedefDecl *Typedef
+ = cast_or_null<TypedefDecl>(
+ SemaRef.InstantiateCurrentDeclRef(T->getDecl()));
+ if (!Typedef)
+ return QualType();
+
+ return SemaRef.Context.getTypeDeclType(Typedef);
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateTypeOfExprType(const TypeOfExprType *T,
+ unsigned Quals) const {
+ Sema::OwningExprResult E
+ = SemaRef.InstantiateExpr(T->getUnderlyingExpr(), TemplateArgs);
+ if (E.isInvalid())
+ return QualType();
+
+ return SemaRef.Context.getTypeOfExprType(E.takeAs<Expr>());
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateTypeOfType(const TypeOfType *T,
+ unsigned Quals) const {
+ QualType Underlying = Instantiate(T->getUnderlyingType());
+ if (Underlying.isNull())
+ return QualType();
+
+ return SemaRef.Context.getTypeOfType(Underlying);
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateRecordType(const RecordType *T,
+ unsigned Quals) const {
+ RecordDecl *Record
+ = cast_or_null<RecordDecl>(SemaRef.InstantiateCurrentDeclRef(T->getDecl()));
+ if (!Record)
+ return QualType();
+
+ return SemaRef.Context.getTypeDeclType(Record);
+}
+
+QualType
+TemplateTypeInstantiator::InstantiateEnumType(const EnumType *T,
+ unsigned Quals) const {
+ EnumDecl *Enum
+ = cast_or_null<EnumDecl>(SemaRef.InstantiateCurrentDeclRef(T->getDecl()));
+ if (!Enum)
+ return QualType();
+
+ return SemaRef.Context.getTypeDeclType(Enum);
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateTemplateTypeParmType(const TemplateTypeParmType *T,
+ unsigned Quals) const {
+ if (T->getDepth() == 0) {
+ // Replace the template type parameter with its corresponding
+ // template argument.
+ assert(TemplateArgs[T->getIndex()].getKind() == TemplateArgument::Type &&
+ "Template argument kind mismatch");
+ QualType Result = TemplateArgs[T->getIndex()].getAsType();
+ if (Result.isNull() || !Quals)
+ return Result;
+
+ // C++ [dcl.ref]p1:
+ // [...] Cv-qualified references are ill-formed except when
+ // the cv-qualifiers are introduced through the use of a
+ // typedef (7.1.3) or of a template type argument (14.3), in
+ // which case the cv-qualifiers are ignored.
+ if (Quals && Result->isReferenceType())
+ Quals = 0;
+
+ return QualType(Result.getTypePtr(), Quals | Result.getCVRQualifiers());
+ }
+
+ // The template type parameter comes from an inner template (e.g.,
+ // the template parameter list of a member template inside the
+ // template we are instantiating). Create a new template type
+ // parameter with the template "level" reduced by one.
+ return SemaRef.Context.getTemplateTypeParmType(T->getDepth() - 1,
+ T->getIndex(),
+ T->getName())
+ .getQualifiedType(Quals);
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateTemplateSpecializationType(
+ const TemplateSpecializationType *T,
+ unsigned Quals) const {
+ llvm::SmallVector<TemplateArgument, 4> InstantiatedTemplateArgs;
+ InstantiatedTemplateArgs.reserve(T->getNumArgs());
+ for (TemplateSpecializationType::iterator Arg = T->begin(), ArgEnd = T->end();
+ Arg != ArgEnd; ++Arg) {
+ switch (Arg->getKind()) {
+ case TemplateArgument::Type: {
+ QualType T = SemaRef.InstantiateType(Arg->getAsType(),
+ TemplateArgs,
+ Arg->getLocation(),
+ DeclarationName());
+ if (T.isNull())
+ return QualType();
+
+ InstantiatedTemplateArgs.push_back(
+ TemplateArgument(Arg->getLocation(), T));
+ break;
+ }
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ InstantiatedTemplateArgs.push_back(*Arg);
+ break;
+
+ case TemplateArgument::Expression:
+ Sema::OwningExprResult E
+ = SemaRef.InstantiateExpr(Arg->getAsExpr(), TemplateArgs);
+ if (E.isInvalid())
+ return QualType();
+ InstantiatedTemplateArgs.push_back(E.takeAs<Expr>());
+ break;
+ }
+ }
+
+ // FIXME: We're missing the locations of the template name, '<', and '>'.
+
+ TemplateName Name = SemaRef.InstantiateTemplateName(T->getTemplateName(),
+ Loc,
+ TemplateArgs);
+
+ return SemaRef.CheckTemplateIdType(Name, Loc, SourceLocation(),
+ &InstantiatedTemplateArgs[0],
+ InstantiatedTemplateArgs.size(),
+ SourceLocation());
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateQualifiedNameType(const QualifiedNameType *T,
+ unsigned Quals) const {
+ // When we instantiated a qualified name type, there's no point in
+ // keeping the qualification around in the instantiated result. So,
+ // just instantiate the named type.
+ return (*this)(T->getNamedType());
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateTypenameType(const TypenameType *T, unsigned Quals) const {
+ if (const TemplateSpecializationType *TemplateId = T->getTemplateId()) {
+ // When the typename type refers to a template-id, the template-id
+ // is dependent and has enough information to instantiate the
+ // result of the typename type. Since we don't care about keeping
+ // the spelling of the typename type in template instantiations,
+ // we just instantiate the template-id.
+ return InstantiateTemplateSpecializationType(TemplateId, Quals);
+ }
+
+ NestedNameSpecifier *NNS
+ = SemaRef.InstantiateNestedNameSpecifier(T->getQualifier(),
+ SourceRange(Loc),
+ TemplateArgs);
+ if (!NNS)
+ return QualType();
+
+ return SemaRef.CheckTypenameType(NNS, *T->getIdentifier(), SourceRange(Loc));
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateObjCInterfaceType(const ObjCInterfaceType *T,
+ unsigned Quals) const {
+ assert(false && "Objective-C types cannot be dependent");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateObjCQualifiedInterfaceType(const ObjCQualifiedInterfaceType *T,
+ unsigned Quals) const {
+ assert(false && "Objective-C types cannot be dependent");
+ return QualType();
+}
+
+QualType
+TemplateTypeInstantiator::
+InstantiateObjCQualifiedIdType(const ObjCQualifiedIdType *T,
+ unsigned Quals) const {
+ assert(false && "Objective-C types cannot be dependent");
+ return QualType();
+}
+
+/// \brief The actual implementation of Sema::InstantiateType().
+QualType TemplateTypeInstantiator::Instantiate(QualType T) const {
+ // If T is not a dependent type, there is nothing to do.
+ if (!T->isDependentType())
+ return T;
+
+ switch (T->getTypeClass()) {
+#define TYPE(Class, Base) \
+ case Type::Class: \
+ return Instantiate##Class##Type(cast<Class##Type>(T.getTypePtr()), \
+ T.getCVRQualifiers());
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ }
+
+ assert(false && "Not all types have been decoded for instantiation");
+ return QualType();
+}
+
+/// \brief Instantiate the type T with a given set of template arguments.
+///
+/// This routine substitutes the given template arguments into the
+/// type T and produces the instantiated type.
+///
+/// \param T the type into which the template arguments will be
+/// substituted. If this type is not dependent, it will be returned
+/// immediately.
+///
+/// \param TemplateArgs the template arguments that will be
+/// substituted for the top-level template parameters within T.
+///
+/// \param Loc the location in the source code where this substitution
+/// is being performed. It will typically be the location of the
+/// declarator (if we're instantiating the type of some declaration)
+/// or the location of the type in the source code (if, e.g., we're
+/// instantiating the type of a cast expression).
+///
+/// \param Entity the name of the entity associated with a declaration
+/// being instantiated (if any). May be empty to indicate that there
+/// is no such entity (if, e.g., this is a type that occurs as part of
+/// a cast expression) or that the entity has no name (e.g., an
+/// unnamed function parameter).
+///
+/// \returns If the instantiation succeeds, the instantiated
+/// type. Otherwise, produces diagnostics and returns a NULL type.
+QualType Sema::InstantiateType(QualType T,
+ const TemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ // If T is not a dependent type, there is nothing to do.
+ if (!T->isDependentType())
+ return T;
+
+ TemplateTypeInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
+ return Instantiator(T);
+}
+
+/// \brief Instantiate the base class specifiers of the given class
+/// template specialization.
+///
+/// Produces a diagnostic and returns true on error, returns false and
+/// attaches the instantiated base classes to the class template
+/// specialization if successful.
+bool
+Sema::InstantiateBaseSpecifiers(CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const TemplateArgumentList &TemplateArgs) {
+ bool Invalid = false;
+ llvm::SmallVector<CXXBaseSpecifier*, 4> InstantiatedBases;
+ for (ClassTemplateSpecializationDecl::base_class_iterator
+ Base = Pattern->bases_begin(), BaseEnd = Pattern->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (!Base->getType()->isDependentType()) {
+ // FIXME: Allocate via ASTContext
+ InstantiatedBases.push_back(new CXXBaseSpecifier(*Base));
+ continue;
+ }
+
+ QualType BaseType = InstantiateType(Base->getType(),
+ TemplateArgs,
+ Base->getSourceRange().getBegin(),
+ DeclarationName());
+ if (BaseType.isNull()) {
+ Invalid = true;
+ continue;
+ }
+
+ if (CXXBaseSpecifier *InstantiatedBase
+ = CheckBaseSpecifier(Instantiation,
+ Base->getSourceRange(),
+ Base->isVirtual(),
+ Base->getAccessSpecifierAsWritten(),
+ BaseType,
+ /*FIXME: Not totally accurate */
+ Base->getSourceRange().getBegin()))
+ InstantiatedBases.push_back(InstantiatedBase);
+ else
+ Invalid = true;
+ }
+
+ if (!Invalid &&
+ AttachBaseSpecifiers(Instantiation, InstantiatedBases.data(),
+ InstantiatedBases.size()))
+ Invalid = true;
+
+ return Invalid;
+}
+
+/// \brief Instantiate the definition of a class from a given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+///
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be either a class template specialization
+/// or a member class of a class template specialization.
+///
+/// \param Pattern is the pattern from which the instantiation
+/// occurs. This will be either the declaration of a class template or
+/// the declaration of a member class of a class template.
+///
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+///
+/// \returns true if an error occurred, false otherwise.
+bool
+Sema::InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
+ const TemplateArgumentList &TemplateArgs,
+ bool ExplicitInstantiation) {
+ bool Invalid = false;
+
+ CXXRecordDecl *PatternDef
+ = cast_or_null<CXXRecordDecl>(Pattern->getDefinition(Context));
+ if (!PatternDef) {
+ if (Pattern == Instantiation->getInstantiatedFromMemberClass()) {
+ Diag(PointOfInstantiation,
+ diag::err_implicit_instantiate_member_undefined)
+ << Context.getTypeDeclType(Instantiation);
+ Diag(Pattern->getLocation(), diag::note_member_of_template_here);
+ } else {
+ Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
+ << ExplicitInstantiation
+ << Context.getTypeDeclType(Instantiation);
+ Diag(Pattern->getLocation(), diag::note_template_decl_here);
+ }
+ return true;
+ }
+ Pattern = PatternDef;
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst)
+ return true;
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ DeclContext *PreviousContext = CurContext;
+ CurContext = Instantiation;
+
+ // Start the definition of this instantiation.
+ Instantiation->startDefinition();
+
+ // Instantiate the base class specifiers.
+ if (InstantiateBaseSpecifiers(Instantiation, Pattern, TemplateArgs))
+ Invalid = true;
+
+ llvm::SmallVector<DeclPtrTy, 4> Fields;
+ for (RecordDecl::decl_iterator Member = Pattern->decls_begin(Context),
+ MemberEnd = Pattern->decls_end(Context);
+ Member != MemberEnd; ++Member) {
+ Decl *NewMember = InstantiateDecl(*Member, Instantiation, TemplateArgs);
+ if (NewMember) {
+ if (NewMember->isInvalidDecl())
+ Invalid = true;
+ else if (FieldDecl *Field = dyn_cast<FieldDecl>(NewMember))
+ Fields.push_back(DeclPtrTy::make(Field));
+ } else {
+ // FIXME: Eventually, a NULL return will mean that one of the
+ // instantiations was a semantic disaster, and we'll want to set Invalid =
+ // true. For now, we expect to skip some members that we can't yet handle.
+ }
+ }
+
+ // Finish checking fields.
+ ActOnFields(0, Instantiation->getLocation(), DeclPtrTy::make(Instantiation),
+ Fields.data(), Fields.size(), SourceLocation(), SourceLocation(),
+ 0);
+
+ // Add any implicitly-declared members that we might need.
+ AddImplicitlyDeclaredMembersToClass(Instantiation);
+
+ // Exit the scope of this instantiation.
+ CurContext = PreviousContext;
+
+ if (!Invalid)
+ Consumer.HandleTagDeclDefinition(Instantiation);
+
+ // If this is an explicit instantiation, instantiate our members, too.
+ if (!Invalid && ExplicitInstantiation) {
+ Inst.Clear();
+ InstantiateClassMembers(PointOfInstantiation, Instantiation, TemplateArgs);
+ }
+
+ return Invalid;
+}
+
+bool
+Sema::InstantiateClassTemplateSpecialization(
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ bool ExplicitInstantiation) {
+ // Perform the actual instantiation on the canonical declaration.
+ ClassTemplateSpec = cast<ClassTemplateSpecializationDecl>(
+ Context.getCanonicalDecl(ClassTemplateSpec));
+
+ // We can only instantiate something that hasn't already been
+ // instantiated or specialized. Fail without any diagnostics: our
+ // caller will provide an error message.
+ if (ClassTemplateSpec->getSpecializationKind() != TSK_Undeclared)
+ return true;
+
+ ClassTemplateDecl *Template = ClassTemplateSpec->getSpecializedTemplate();
+ CXXRecordDecl *Pattern = Template->getTemplatedDecl();
+ const TemplateArgumentList *TemplateArgs
+ = &ClassTemplateSpec->getTemplateArgs();
+
+ // Determine whether any class template partial specializations
+ // match the given template arguments.
+ llvm::SmallVector<ClassTemplatePartialSpecializationDecl *, 4> Matched;
+ for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ Partial = Template->getPartialSpecializations().begin(),
+ PartialEnd = Template->getPartialSpecializations().end();
+ Partial != PartialEnd;
+ ++Partial) {
+ if (DeduceTemplateArguments(&*Partial, ClassTemplateSpec->getTemplateArgs()))
+ Matched.push_back(&*Partial);
+ }
+
+ if (Matched.size() == 1) {
+ Pattern = Matched[0];
+ // FIXME: set TemplateArgs to the template arguments of the
+ // partial specialization, instantiated with the deduced template
+ // arguments.
+ } else if (Matched.size() > 1) {
+ // FIXME: Implement partial ordering of class template partial
+ // specializations.
+ Diag(ClassTemplateSpec->getLocation(),
+ diag::unsup_template_partial_spec_ordering);
+ }
+
+ // Note that this is an instantiation.
+ ClassTemplateSpec->setSpecializationKind(
+ ExplicitInstantiation? TSK_ExplicitInstantiation
+ : TSK_ImplicitInstantiation);
+
+ return InstantiateClass(ClassTemplateSpec->getLocation(),
+ ClassTemplateSpec, Pattern, *TemplateArgs,
+ ExplicitInstantiation);
+}
+
+/// \brief Instantiate the definitions of all of the member of the
+/// given class, which is an instantiation of a class template or a
+/// member class of a template.
+void
+Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ const TemplateArgumentList &TemplateArgs) {
+ for (DeclContext::decl_iterator D = Instantiation->decls_begin(Context),
+ DEnd = Instantiation->decls_end(Context);
+ D != DEnd; ++D) {
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(*D)) {
+ if (!Function->getBody(Context))
+ InstantiateFunctionDefinition(PointOfInstantiation, Function);
+ } else if (VarDecl *Var = dyn_cast<VarDecl>(*D)) {
+ const VarDecl *Def = 0;
+ if (!Var->getDefinition(Def))
+ InstantiateVariableDefinition(Var);
+ } else if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(*D)) {
+ if (!Record->isInjectedClassName() && !Record->getDefinition(Context)) {
+ assert(Record->getInstantiatedFromMemberClass() &&
+ "Missing instantiated-from-template information");
+ InstantiateClass(PointOfInstantiation, Record,
+ Record->getInstantiatedFromMemberClass(),
+ TemplateArgs, true);
+ }
+ }
+ }
+}
+
+/// \brief Instantiate the definitions of all of the members of the
+/// given class template specialization, which was named as part of an
+/// explicit instantiation.
+void Sema::InstantiateClassTemplateSpecializationMembers(
+ SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec) {
+ // C++0x [temp.explicit]p7:
+ // An explicit instantiation that names a class template
+ // specialization is an explicit instantion of the same kind
+ // (declaration or definition) of each of its members (not
+ // including members inherited from base classes) that has not
+ // been previously explicitly specialized in the translation unit
+ // containing the explicit instantiation, except as described
+ // below.
+ InstantiateClassMembers(PointOfInstantiation, ClassTemplateSpec,
+ ClassTemplateSpec->getTemplateArgs());
+}
+
+/// \brief Instantiate a nested-name-specifier.
+NestedNameSpecifier *
+Sema::InstantiateNestedNameSpecifier(NestedNameSpecifier *NNS,
+ SourceRange Range,
+ const TemplateArgumentList &TemplateArgs) {
+ // Instantiate the prefix of this nested name specifier.
+ NestedNameSpecifier *Prefix = NNS->getPrefix();
+ if (Prefix) {
+ Prefix = InstantiateNestedNameSpecifier(Prefix, Range, TemplateArgs);
+ if (!Prefix)
+ return 0;
+ }
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier: {
+ assert(Prefix &&
+ "Can't have an identifier nested-name-specifier with no prefix");
+ CXXScopeSpec SS;
+ // FIXME: The source location information is all wrong.
+ SS.setRange(Range);
+ SS.setScopeRep(Prefix);
+ return static_cast<NestedNameSpecifier *>(
+ ActOnCXXNestedNameSpecifier(0, SS,
+ Range.getEnd(),
+ Range.getEnd(),
+ *NNS->getAsIdentifier()));
+ break;
+ }
+
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::Global:
+ return NNS;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec: {
+ QualType T = QualType(NNS->getAsType(), 0);
+ if (!T->isDependentType())
+ return NNS;
+
+ T = InstantiateType(T, TemplateArgs, Range.getBegin(), DeclarationName());
+ if (T.isNull())
+ return 0;
+
+ if (T->isRecordType() ||
+ (getLangOptions().CPlusPlus0x && T->isEnumeralType())) {
+ assert(T.getCVRQualifiers() == 0 && "Can't get cv-qualifiers here");
+ return NestedNameSpecifier::Create(Context, Prefix,
+ NNS->getKind() == NestedNameSpecifier::TypeSpecWithTemplate,
+ T.getTypePtr());
+ }
+
+ Diag(Range.getBegin(), diag::err_nested_name_spec_non_tag) << T;
+ return 0;
+ }
+ }
+
+ // Required to silence a GCC warning
+ return 0;
+}
+
+TemplateName
+Sema::InstantiateTemplateName(TemplateName Name, SourceLocation Loc,
+ const TemplateArgumentList &TemplateArgs) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Name.getAsTemplateDecl())) {
+ assert(TTP->getDepth() == 0 &&
+ "Cannot reduce depth of a template template parameter");
+ assert(TemplateArgs[TTP->getPosition()].getAsDecl() &&
+ "Wrong kind of template template argument");
+ ClassTemplateDecl *ClassTemplate
+ = dyn_cast<ClassTemplateDecl>(
+ TemplateArgs[TTP->getPosition()].getAsDecl());
+ assert(ClassTemplate && "Expected a class template");
+ if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
+ NestedNameSpecifier *NNS
+ = InstantiateNestedNameSpecifier(QTN->getQualifier(),
+ /*FIXME=*/SourceRange(Loc),
+ TemplateArgs);
+ if (NNS)
+ return Context.getQualifiedTemplateName(NNS,
+ QTN->hasTemplateKeyword(),
+ ClassTemplate);
+ }
+
+ return TemplateName(ClassTemplate);
+ } else if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
+ NestedNameSpecifier *NNS
+ = InstantiateNestedNameSpecifier(DTN->getQualifier(),
+ /*FIXME=*/SourceRange(Loc),
+ TemplateArgs);
+
+ if (!NNS) // FIXME: Not the best recovery strategy.
+ return Name;
+
+ if (NNS->isDependent())
+ return Context.getDependentTemplateName(NNS, DTN->getName());
+
+ // Somewhat redundant with ActOnDependentTemplateName.
+ CXXScopeSpec SS;
+ SS.setRange(SourceRange(Loc));
+ SS.setScopeRep(NNS);
+ TemplateTy Template;
+ TemplateNameKind TNK = isTemplateName(*DTN->getName(), 0, Template, &SS);
+ if (TNK == TNK_Non_template) {
+ Diag(Loc, diag::err_template_kw_refers_to_non_template)
+ << DTN->getName();
+ return Name;
+ } else if (TNK == TNK_Function_template) {
+ Diag(Loc, diag::err_template_kw_refers_to_non_template)
+ << DTN->getName();
+ return Name;
+ }
+
+ return Template.getAsVal<TemplateName>();
+ }
+
+
+
+ // FIXME: Even if we're referring to a Decl that isn't a template template
+ // parameter, we may need to instantiate the outer contexts of that
+ // Decl. However, this won't be needed until we implement member templates.
+ return Name;
+}
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
new file mode 100644
index 0000000..6d7dc2e
--- /dev/null
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -0,0 +1,767 @@
+//===--- SemaTemplateInstantiateDecl.cpp - C++ Template Decl Instantiation ===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation for declarations.
+//
+//===----------------------------------------------------------------------===/
+#include "Sema.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "llvm/Support/Compiler.h"
+
+using namespace clang;
+
+namespace {
+ class VISIBILITY_HIDDEN TemplateDeclInstantiator
+ : public DeclVisitor<TemplateDeclInstantiator, Decl *> {
+ Sema &SemaRef;
+ DeclContext *Owner;
+ const TemplateArgumentList &TemplateArgs;
+
+ public:
+ typedef Sema::OwningExprResult OwningExprResult;
+
+ TemplateDeclInstantiator(Sema &SemaRef, DeclContext *Owner,
+ const TemplateArgumentList &TemplateArgs)
+ : SemaRef(SemaRef), Owner(Owner), TemplateArgs(TemplateArgs) { }
+
+ // FIXME: Once we get closer to completion, replace these manually-written
+ // declarations with automatically-generated ones from
+ // clang/AST/DeclNodes.def.
+ Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ Decl *VisitNamespaceDecl(NamespaceDecl *D);
+ Decl *VisitTypedefDecl(TypedefDecl *D);
+ Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
+ Decl *VisitEnumDecl(EnumDecl *D);
+ Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
+ Decl *VisitCXXRecordDecl(CXXRecordDecl *D);
+ Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
+ Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
+ ParmVarDecl *VisitParmVarDecl(ParmVarDecl *D);
+ Decl *VisitOriginalParmVarDecl(OriginalParmVarDecl *D);
+
+ // Base case. FIXME: Remove once we can instantiate everything.
+ Decl *VisitDecl(Decl *) {
+ assert(false && "Template instantiation of unknown declaration kind!");
+ return 0;
+ }
+
+ // Helper functions for instantiating methods.
+ QualType InstantiateFunctionType(FunctionDecl *D,
+ llvm::SmallVectorImpl<ParmVarDecl *> &Params);
+ bool InitMethodInstantiation(CXXMethodDecl *New, CXXMethodDecl *Tmpl);
+ };
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ assert(false && "Translation units cannot be instantiated");
+ return D;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitNamespaceDecl(NamespaceDecl *D) {
+ assert(false && "Namespaces cannot be instantiated");
+ return D;
+}
+
+Decl *TemplateDeclInstantiator::VisitTypedefDecl(TypedefDecl *D) {
+ bool Invalid = false;
+ QualType T = D->getUnderlyingType();
+ if (T->isDependentType()) {
+ T = SemaRef.InstantiateType(T, TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (T.isNull()) {
+ Invalid = true;
+ T = SemaRef.Context.IntTy;
+ }
+ }
+
+ // Create the new typedef
+ TypedefDecl *Typedef
+ = TypedefDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier(), T);
+ if (Invalid)
+ Typedef->setInvalidDecl();
+
+ Owner->addDecl(SemaRef.Context, Typedef);
+
+ return Typedef;
+}
+
+Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
+ // Instantiate the type of the declaration
+ QualType T = SemaRef.InstantiateType(D->getType(), TemplateArgs,
+ D->getTypeSpecStartLoc(),
+ D->getDeclName());
+ if (T.isNull())
+ return 0;
+
+ // Build the instantiated declaration
+ VarDecl *Var = VarDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(), D->getIdentifier(),
+ T, D->getStorageClass(),
+ D->getTypeSpecStartLoc());
+ Var->setThreadSpecified(D->isThreadSpecified());
+ Var->setCXXDirectInitializer(D->hasCXXDirectInitializer());
+ Var->setDeclaredInCondition(D->isDeclaredInCondition());
+
+ // FIXME: In theory, we could have a previous declaration for variables that
+ // are not static data members.
+ bool Redeclaration = false;
+ SemaRef.CheckVariableDeclaration(Var, 0, Redeclaration);
+ Owner->addDecl(SemaRef.Context, Var);
+
+ if (D->getInit()) {
+ OwningExprResult Init
+ = SemaRef.InstantiateExpr(D->getInit(), TemplateArgs);
+ if (Init.isInvalid())
+ Var->setInvalidDecl();
+ else
+ SemaRef.AddInitializerToDecl(Sema::DeclPtrTy::make(Var), move(Init),
+ D->hasCXXDirectInitializer());
+ } else {
+ // FIXME: Call ActOnUninitializedDecl? (Not always)
+ }
+
+ return Var;
+}
+
+Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
+ bool Invalid = false;
+ QualType T = D->getType();
+ if (T->isDependentType()) {
+ T = SemaRef.InstantiateType(T, TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!T.isNull() && T->isFunctionType()) {
+ // C++ [temp.arg.type]p3:
+ // If a declaration acquires a function type through a type
+ // dependent on a template-parameter and this causes a
+ // declaration that does not use the syntactic form of a
+ // function declarator to have function type, the program is
+ // ill-formed.
+ SemaRef.Diag(D->getLocation(), diag::err_field_instantiates_to_function)
+ << T;
+ T = QualType();
+ Invalid = true;
+ }
+ }
+
+ Expr *BitWidth = D->getBitWidth();
+ if (Invalid)
+ BitWidth = 0;
+ else if (BitWidth) {
+ OwningExprResult InstantiatedBitWidth
+ = SemaRef.InstantiateExpr(BitWidth, TemplateArgs);
+ if (InstantiatedBitWidth.isInvalid()) {
+ Invalid = true;
+ BitWidth = 0;
+ } else
+ BitWidth = InstantiatedBitWidth.takeAs<Expr>();
+ }
+
+ FieldDecl *Field = SemaRef.CheckFieldDecl(D->getDeclName(), T,
+ cast<RecordDecl>(Owner),
+ D->getLocation(),
+ D->isMutable(),
+ BitWidth,
+ D->getAccess(),
+ 0);
+ if (Field) {
+ if (Invalid)
+ Field->setInvalidDecl();
+
+ Owner->addDecl(SemaRef.Context, Field);
+ }
+
+ return Field;
+}
+
+Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ Expr *AssertExpr = D->getAssertExpr();
+
+ OwningExprResult InstantiatedAssertExpr
+ = SemaRef.InstantiateExpr(AssertExpr, TemplateArgs);
+ if (InstantiatedAssertExpr.isInvalid())
+ return 0;
+
+ OwningExprResult Message = SemaRef.Clone(D->getMessage());
+ Decl *StaticAssert
+ = SemaRef.ActOnStaticAssertDeclaration(D->getLocation(),
+ move(InstantiatedAssertExpr),
+ move(Message)).getAs<Decl>();
+ return StaticAssert;
+}
+
+Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
+ EnumDecl *Enum = EnumDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(), D->getIdentifier(),
+ /*PrevDecl=*/0);
+ Enum->setInstantiationOfMemberEnum(D);
+ Enum->setAccess(D->getAccess());
+ Owner->addDecl(SemaRef.Context, Enum);
+ Enum->startDefinition();
+
+ llvm::SmallVector<Sema::DeclPtrTy, 4> Enumerators;
+
+ EnumConstantDecl *LastEnumConst = 0;
+ for (EnumDecl::enumerator_iterator EC = D->enumerator_begin(SemaRef.Context),
+ ECEnd = D->enumerator_end(SemaRef.Context);
+ EC != ECEnd; ++EC) {
+ // The specified value for the enumerator.
+ OwningExprResult Value = SemaRef.Owned((Expr *)0);
+ if (Expr *UninstValue = EC->getInitExpr())
+ Value = SemaRef.InstantiateExpr(UninstValue, TemplateArgs);
+
+ // Drop the initial value and continue.
+ bool isInvalid = false;
+ if (Value.isInvalid()) {
+ Value = SemaRef.Owned((Expr *)0);
+ isInvalid = true;
+ }
+
+ EnumConstantDecl *EnumConst
+ = SemaRef.CheckEnumConstant(Enum, LastEnumConst,
+ EC->getLocation(), EC->getIdentifier(),
+ move(Value));
+
+ if (isInvalid) {
+ if (EnumConst)
+ EnumConst->setInvalidDecl();
+ Enum->setInvalidDecl();
+ }
+
+ if (EnumConst) {
+ Enum->addDecl(SemaRef.Context, EnumConst);
+ Enumerators.push_back(Sema::DeclPtrTy::make(EnumConst));
+ LastEnumConst = EnumConst;
+ }
+ }
+
+ // FIXME: Fixup LBraceLoc and RBraceLoc
+ SemaRef.ActOnEnumBody(Enum->getLocation(), SourceLocation(), SourceLocation(),
+ Sema::DeclPtrTy::make(Enum),
+ &Enumerators[0], Enumerators.size());
+
+ return Enum;
+}
+
+Decl *TemplateDeclInstantiator::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ assert(false && "EnumConstantDecls can only occur within EnumDecls.");
+ return 0;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ CXXRecordDecl *PrevDecl = 0;
+ if (D->isInjectedClassName())
+ PrevDecl = cast<CXXRecordDecl>(Owner);
+
+ CXXRecordDecl *Record
+ = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
+ D->getLocation(), D->getIdentifier(), PrevDecl);
+ Record->setImplicit(D->isImplicit());
+ Record->setAccess(D->getAccess());
+ if (!D->isInjectedClassName())
+ Record->setInstantiationOfMemberClass(D);
+
+ Owner->addDecl(SemaRef.Context, Record);
+ return Record;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ // Only handle actual methods; we'll deal with constructors,
+ // destructors, etc. separately.
+ if (D->getKind() != Decl::CXXMethod)
+ return 0;
+
+ Sema::LocalInstantiationScope Scope(SemaRef);
+
+ llvm::SmallVector<ParmVarDecl *, 4> Params;
+ QualType T = InstantiateFunctionType(D, Params);
+ if (T.isNull())
+ return 0;
+
+ // Build the instantiated method declaration.
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Owner);
+ CXXMethodDecl *Method
+ = CXXMethodDecl::Create(SemaRef.Context, Record, D->getLocation(),
+ D->getDeclName(), T, D->isStatic(),
+ D->isInline());
+ Method->setInstantiationOfMemberFunction(D);
+
+ // Attach the parameters
+ for (unsigned P = 0; P < Params.size(); ++P)
+ Params[P]->setOwningFunction(Method);
+ Method->setParams(SemaRef.Context, Params.data(), Params.size());
+
+ if (InitMethodInstantiation(Method, D))
+ Method->setInvalidDecl();
+
+ NamedDecl *PrevDecl
+ = SemaRef.LookupQualifiedName(Owner, Method->getDeclName(),
+ Sema::LookupOrdinaryName, true);
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (PrevDecl && PrevDecl->getIdentifierNamespace() == Decl::IDNS_Tag)
+ PrevDecl = 0;
+ bool Redeclaration = false;
+ bool OverloadableAttrRequired = false;
+ SemaRef.CheckFunctionDeclaration(Method, PrevDecl, Redeclaration,
+ /*FIXME:*/OverloadableAttrRequired);
+
+ if (!Method->isInvalidDecl() || !PrevDecl)
+ Owner->addDecl(SemaRef.Context, Method);
+ return Method;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ Sema::LocalInstantiationScope Scope(SemaRef);
+
+ llvm::SmallVector<ParmVarDecl *, 4> Params;
+ QualType T = InstantiateFunctionType(D, Params);
+ if (T.isNull())
+ return 0;
+
+ // Build the instantiated method declaration.
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Owner);
+ QualType ClassTy = SemaRef.Context.getTypeDeclType(Record);
+ DeclarationName Name
+ = SemaRef.Context.DeclarationNames.getCXXConstructorName(
+ SemaRef.Context.getCanonicalType(ClassTy));
+ CXXConstructorDecl *Constructor
+ = CXXConstructorDecl::Create(SemaRef.Context, Record, D->getLocation(),
+ Name, T, D->isExplicit(), D->isInline(),
+ false);
+ Constructor->setInstantiationOfMemberFunction(D);
+
+ // Attach the parameters
+ for (unsigned P = 0; P < Params.size(); ++P)
+ Params[P]->setOwningFunction(Constructor);
+ Constructor->setParams(SemaRef.Context, Params.data(), Params.size());
+
+ if (InitMethodInstantiation(Constructor, D))
+ Constructor->setInvalidDecl();
+
+ NamedDecl *PrevDecl
+ = SemaRef.LookupQualifiedName(Owner, Name, Sema::LookupOrdinaryName, true);
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (PrevDecl && PrevDecl->getIdentifierNamespace() == Decl::IDNS_Tag)
+ PrevDecl = 0;
+ bool Redeclaration = false;
+ bool OverloadableAttrRequired = false;
+ SemaRef.CheckFunctionDeclaration(Constructor, PrevDecl, Redeclaration,
+ /*FIXME:*/OverloadableAttrRequired);
+
+ Record->addedConstructor(SemaRef.Context, Constructor);
+ Owner->addDecl(SemaRef.Context, Constructor);
+ return Constructor;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ Sema::LocalInstantiationScope Scope(SemaRef);
+
+ llvm::SmallVector<ParmVarDecl *, 4> Params;
+ QualType T = InstantiateFunctionType(D, Params);
+ if (T.isNull())
+ return 0;
+ assert(Params.size() == 0 && "Destructor with parameters?");
+
+ // Build the instantiated destructor declaration.
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Owner);
+ QualType ClassTy =
+ SemaRef.Context.getCanonicalType(SemaRef.Context.getTypeDeclType(Record));
+ CXXDestructorDecl *Destructor
+ = CXXDestructorDecl::Create(SemaRef.Context, Record,
+ D->getLocation(),
+ SemaRef.Context.DeclarationNames.getCXXDestructorName(ClassTy),
+ T, D->isInline(), false);
+ Destructor->setInstantiationOfMemberFunction(D);
+ if (InitMethodInstantiation(Destructor, D))
+ Destructor->setInvalidDecl();
+
+ bool Redeclaration = false;
+ bool OverloadableAttrRequired = false;
+ NamedDecl *PrevDecl = 0;
+ SemaRef.CheckFunctionDeclaration(Destructor, PrevDecl, Redeclaration,
+ /*FIXME:*/OverloadableAttrRequired);
+ Owner->addDecl(SemaRef.Context, Destructor);
+ return Destructor;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ Sema::LocalInstantiationScope Scope(SemaRef);
+
+ llvm::SmallVector<ParmVarDecl *, 4> Params;
+ QualType T = InstantiateFunctionType(D, Params);
+ if (T.isNull())
+ return 0;
+ assert(Params.size() == 0 && "Destructor with parameters?");
+
+ // Build the instantiated conversion declaration.
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Owner);
+ QualType ClassTy = SemaRef.Context.getTypeDeclType(Record);
+ QualType ConvTy
+ = SemaRef.Context.getCanonicalType(T->getAsFunctionType()->getResultType());
+ CXXConversionDecl *Conversion
+ = CXXConversionDecl::Create(SemaRef.Context, Record,
+ D->getLocation(),
+ SemaRef.Context.DeclarationNames.getCXXConversionFunctionName(ConvTy),
+ T, D->isInline(), D->isExplicit());
+ Conversion->setInstantiationOfMemberFunction(D);
+ if (InitMethodInstantiation(Conversion, D))
+ Conversion->setInvalidDecl();
+
+ bool Redeclaration = false;
+ bool OverloadableAttrRequired = false;
+ NamedDecl *PrevDecl = 0;
+ SemaRef.CheckFunctionDeclaration(Conversion, PrevDecl, Redeclaration,
+ /*FIXME:*/OverloadableAttrRequired);
+ Owner->addDecl(SemaRef.Context, Conversion);
+ return Conversion;
+}
+
+ParmVarDecl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) {
+ QualType OrigT = SemaRef.InstantiateType(D->getOriginalType(), TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (OrigT.isNull())
+ return 0;
+
+ QualType T = SemaRef.adjustParameterType(OrigT);
+
+ if (D->getDefaultArg()) {
+ // FIXME: Leave a marker for "uninstantiated" default
+ // arguments. They only get instantiated on demand at the call
+ // site.
+ unsigned DiagID = SemaRef.Diags.getCustomDiagID(Diagnostic::Warning,
+ "sorry, dropping default argument during template instantiation");
+ SemaRef.Diag(D->getDefaultArg()->getSourceRange().getBegin(), DiagID)
+ << D->getDefaultArg()->getSourceRange();
+ }
+
+ // Allocate the parameter
+ ParmVarDecl *Param = 0;
+ if (T == OrigT)
+ Param = ParmVarDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier(), T, D->getStorageClass(),
+ 0);
+ else
+ Param = OriginalParmVarDecl::Create(SemaRef.Context, Owner,
+ D->getLocation(), D->getIdentifier(),
+ T, OrigT, D->getStorageClass(), 0);
+
+ // Note: we don't try to instantiate function parameters until after
+ // we've instantiated the function's type. Therefore, we don't have
+ // to check for 'void' parameter types here.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Param);
+ return Param;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitOriginalParmVarDecl(OriginalParmVarDecl *D) {
+ // Since parameter types can decay either before or after
+ // instantiation, we simply treat OriginalParmVarDecls as
+ // ParmVarDecls the same way, and create one or the other depending
+ // on what happens after template instantiation.
+ return VisitParmVarDecl(D);
+}
+
+Decl *Sema::InstantiateDecl(Decl *D, DeclContext *Owner,
+ const TemplateArgumentList &TemplateArgs) {
+ TemplateDeclInstantiator Instantiator(*this, Owner, TemplateArgs);
+ return Instantiator.Visit(D);
+}
+
+/// \brief Instantiates the type of the given function, including
+/// instantiating all of the function parameters.
+///
+/// \param D The function that we will be instantiated
+///
+/// \param Params the instantiated parameter declarations
+
+/// \returns the instantiated function's type if successfull, a NULL
+/// type if there was an error.
+QualType
+TemplateDeclInstantiator::InstantiateFunctionType(FunctionDecl *D,
+ llvm::SmallVectorImpl<ParmVarDecl *> &Params) {
+ bool InvalidDecl = false;
+
+ // Instantiate the function parameters
+ TemplateDeclInstantiator ParamInstantiator(SemaRef, 0, TemplateArgs);
+ llvm::SmallVector<QualType, 4> ParamTys;
+ for (FunctionDecl::param_iterator P = D->param_begin(),
+ PEnd = D->param_end();
+ P != PEnd; ++P) {
+ if (ParmVarDecl *PInst = ParamInstantiator.VisitParmVarDecl(*P)) {
+ if (PInst->getType()->isVoidType()) {
+ SemaRef.Diag(PInst->getLocation(), diag::err_param_with_void_type);
+ PInst->setInvalidDecl();
+ }
+ else if (SemaRef.RequireNonAbstractType(PInst->getLocation(),
+ PInst->getType(),
+ diag::err_abstract_type_in_decl,
+ Sema::AbstractParamType))
+ PInst->setInvalidDecl();
+
+ Params.push_back(PInst);
+ ParamTys.push_back(PInst->getType());
+
+ if (PInst->isInvalidDecl())
+ InvalidDecl = true;
+ } else
+ InvalidDecl = true;
+ }
+
+ // FIXME: Deallocate dead declarations.
+ if (InvalidDecl)
+ return QualType();
+
+ const FunctionProtoType *Proto = D->getType()->getAsFunctionProtoType();
+ assert(Proto && "Missing prototype?");
+ QualType ResultType
+ = SemaRef.InstantiateType(Proto->getResultType(), TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (ResultType.isNull())
+ return QualType();
+
+ return SemaRef.BuildFunctionType(ResultType, ParamTys.data(), ParamTys.size(),
+ Proto->isVariadic(), Proto->getTypeQuals(),
+ D->getLocation(), D->getDeclName());
+}
+
+/// \brief Initializes common fields of an instantiated method
+/// declaration (New) from the corresponding fields of its template
+/// (Tmpl).
+///
+/// \returns true if there was an error
+bool
+TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New,
+ CXXMethodDecl *Tmpl) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Owner);
+ New->setAccess(Tmpl->getAccess());
+ if (Tmpl->isVirtualAsWritten()) {
+ New->setVirtualAsWritten(true);
+ Record->setAggregate(false);
+ Record->setPOD(false);
+ Record->setPolymorphic(true);
+ }
+ if (Tmpl->isDeleted())
+ New->setDeleted();
+ if (Tmpl->isPure()) {
+ New->setPure();
+ Record->setAbstract(true);
+ }
+
+ // FIXME: attributes
+ // FIXME: New needs a pointer to Tmpl
+ return false;
+}
+
+/// \brief Instantiate the definition of the given function from its
+/// template.
+///
+/// \param Function the already-instantiated declaration of a
+/// function.
+void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
+ FunctionDecl *Function) {
+ // FIXME: make this work for function template specializations, too.
+
+ if (Function->isInvalidDecl())
+ return;
+
+ // Find the function body that we'll be substituting.
+ const FunctionDecl *PatternDecl
+ = Function->getInstantiatedFromMemberFunction();
+ Stmt *Pattern = 0;
+ if (PatternDecl)
+ Pattern = PatternDecl->getBody(Context, PatternDecl);
+
+ if (!Pattern)
+ return;
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Function);
+ if (Inst)
+ return;
+
+ ActOnStartOfFunctionDef(0, DeclPtrTy::make(Function));
+
+ // Introduce a new scope where local variable instantiations will be
+ // recorded.
+ LocalInstantiationScope Scope(*this);
+
+ // Introduce the instantiated function parameters into the local
+ // instantiation scope.
+ for (unsigned I = 0, N = PatternDecl->getNumParams(); I != N; ++I)
+ Scope.InstantiatedLocal(PatternDecl->getParamDecl(I),
+ Function->getParamDecl(I));
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ DeclContext *PreviousContext = CurContext;
+ CurContext = Function;
+
+ // Instantiate the function body.
+ OwningStmtResult Body
+ = InstantiateStmt(Pattern, getTemplateInstantiationArgs(Function));
+
+ ActOnFinishFunctionBody(DeclPtrTy::make(Function), move(Body),
+ /*IsInstantiation=*/true);
+
+ CurContext = PreviousContext;
+
+ DeclGroupRef DG(Function);
+ Consumer.HandleTopLevelDecl(DG);
+}
+
+/// \brief Instantiate the definition of the given variable from its
+/// template.
+///
+/// \param Var the already-instantiated declaration of a variable.
+void Sema::InstantiateVariableDefinition(VarDecl *Var) {
+ // FIXME: Implement this!
+}
+
+static bool isInstantiationOf(ASTContext &Ctx, NamedDecl *D, Decl *Other) {
+ if (D->getKind() != Other->getKind())
+ return false;
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Other))
+ return Ctx.getCanonicalDecl(Record->getInstantiatedFromMemberClass())
+ == Ctx.getCanonicalDecl(D);
+
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Other))
+ return Ctx.getCanonicalDecl(Function->getInstantiatedFromMemberFunction())
+ == Ctx.getCanonicalDecl(D);
+
+ if (EnumDecl *Enum = dyn_cast<EnumDecl>(Other))
+ return Ctx.getCanonicalDecl(Enum->getInstantiatedFromMemberEnum())
+ == Ctx.getCanonicalDecl(D);
+
+ // FIXME: How can we find instantiations of anonymous unions?
+
+ return D->getDeclName() && isa<NamedDecl>(Other) &&
+ D->getDeclName() == cast<NamedDecl>(Other)->getDeclName();
+}
+
+template<typename ForwardIterator>
+static NamedDecl *findInstantiationOf(ASTContext &Ctx,
+ NamedDecl *D,
+ ForwardIterator first,
+ ForwardIterator last) {
+ for (; first != last; ++first)
+ if (isInstantiationOf(Ctx, D, *first))
+ return cast<NamedDecl>(*first);
+
+ return 0;
+}
+
+/// \brief Find the instantiation of the given declaration within the
+/// current instantiation.
+///
+/// This routine is intended to be used when \p D is a declaration
+/// referenced from within a template, that needs to mapped into the
+/// corresponding declaration within an instantiation. For example,
+/// given:
+///
+/// \code
+/// template<typename T>
+/// struct X {
+/// enum Kind {
+/// KnownValue = sizeof(T)
+/// };
+///
+/// bool getKind() const { return KnownValue; }
+/// };
+///
+/// template struct X<int>;
+/// \endcode
+///
+/// In the instantiation of X<int>::getKind(), we need to map the
+/// EnumConstantDecl for KnownValue (which refers to
+/// X<T>::<Kind>::KnownValue) to its instantiation
+/// (X<int>::<Kind>::KnownValue). InstantiateCurrentDeclRef() performs
+/// this mapping from within the instantiation of X<int>.
+NamedDecl * Sema::InstantiateCurrentDeclRef(NamedDecl *D) {
+ DeclContext *ParentDC = D->getDeclContext();
+ if (isa<ParmVarDecl>(D) || ParentDC->isFunctionOrMethod()) {
+ // D is a local of some kind. Look into the map of local
+ // declarations to their instantiations.
+ return cast<NamedDecl>(CurrentInstantiationScope->getInstantiationOf(D));
+ }
+
+ if (NamedDecl *ParentDecl = dyn_cast<NamedDecl>(ParentDC)) {
+ ParentDecl = InstantiateCurrentDeclRef(ParentDecl);
+ if (!ParentDecl)
+ return 0;
+
+ ParentDC = cast<DeclContext>(ParentDecl);
+ }
+
+ if (ParentDC != D->getDeclContext()) {
+ // We performed some kind of instantiation in the parent context,
+ // so now we need to look into the instantiated parent context to
+ // find the instantiation of the declaration D.
+ NamedDecl *Result = 0;
+ if (D->getDeclName()) {
+ DeclContext::lookup_result Found
+ = ParentDC->lookup(Context, D->getDeclName());
+ Result = findInstantiationOf(Context, D, Found.first, Found.second);
+ } else {
+ // Since we don't have a name for the entity we're looking for,
+ // our only option is to walk through all of the declarations to
+ // find that name. This will occur in a few cases:
+ //
+ // - anonymous struct/union within a template
+ // - unnamed class/struct/union/enum within a template
+ //
+ // FIXME: Find a better way to find these instantiations!
+ Result = findInstantiationOf(Context, D,
+ ParentDC->decls_begin(Context),
+ ParentDC->decls_end(Context));
+ }
+ assert(Result && "Unable to find instantiation of declaration!");
+ D = Result;
+ }
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D))
+ if (ClassTemplateDecl *ClassTemplate
+ = Record->getDescribedClassTemplate()) {
+ // When the declaration D was parsed, it referred to the current
+ // instantiation. Therefore, look through the current context,
+ // which contains actual instantiations, to find the
+ // instantiation of the "current instantiation" that D refers
+ // to. Alternatively, we could just instantiate the
+ // injected-class-name with the current template arguments, but
+ // such an instantiation is far more expensive.
+ for (DeclContext *DC = CurContext; !DC->isFileContext();
+ DC = DC->getParent()) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(DC))
+ if (Context.getCanonicalDecl(Spec->getSpecializedTemplate())
+ == Context.getCanonicalDecl(ClassTemplate))
+ return Spec;
+ }
+
+ assert(false &&
+ "Unable to find declaration for the current instantiation");
+ }
+
+ return D;
+}
diff --git a/lib/Sema/SemaTemplateInstantiateExpr.cpp b/lib/Sema/SemaTemplateInstantiateExpr.cpp
new file mode 100644
index 0000000..a6b9703
--- /dev/null
+++ b/lib/Sema/SemaTemplateInstantiateExpr.cpp
@@ -0,0 +1,1278 @@
+//===--- SemaTemplateInstantiateExpr.cpp - C++ Template Expr Instantiation ===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation for expressions.
+//
+//===----------------------------------------------------------------------===/
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Parse/DeclSpec.h"
+#include "clang/Parse/Designator.h"
+#include "clang/Lex/Preprocessor.h" // for the identifier table
+#include "llvm/Support/Compiler.h"
+using namespace clang;
+
+namespace {
+ class VISIBILITY_HIDDEN TemplateExprInstantiator
+ : public StmtVisitor<TemplateExprInstantiator, Sema::OwningExprResult> {
+ Sema &SemaRef;
+ const TemplateArgumentList &TemplateArgs;
+
+ public:
+ typedef Sema::OwningExprResult OwningExprResult;
+
+ TemplateExprInstantiator(Sema &SemaRef,
+ const TemplateArgumentList &TemplateArgs)
+ : SemaRef(SemaRef), TemplateArgs(TemplateArgs) { }
+
+ // Declare VisitXXXStmt nodes for all of the expression kinds.
+#define EXPR(Type, Base) OwningExprResult Visit##Type(Type *S);
+#define STMT(Type, Base)
+#include "clang/AST/StmtNodes.def"
+
+ // Base case. We can't get here.
+ Sema::OwningExprResult VisitStmt(Stmt *S) {
+ S->dump();
+ assert(false && "Cannot instantiate this kind of expression");
+ return SemaRef.ExprError();
+ }
+ };
+}
+
+// Base case. We can't get here.
+Sema::OwningExprResult TemplateExprInstantiator::VisitExpr(Expr *E) {
+ E->dump();
+ assert(false && "Cannot instantiate this kind of expression");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitPredefinedExpr(PredefinedExpr *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitIntegerLiteral(IntegerLiteral *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitFloatingLiteral(FloatingLiteral *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitStringLiteral(StringLiteral *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCharacterLiteral(CharacterLiteral *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitGNUNullExpr(GNUNullExpr *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitUnresolvedFunctionNameExpr(
+ UnresolvedFunctionNameExpr *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitDeclRefExpr(DeclRefExpr *E) {
+ NamedDecl *D = E->getDecl();
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ assert(NTTP->getDepth() == 0 && "No nested templates yet");
+ const TemplateArgument &Arg = TemplateArgs[NTTP->getPosition()];
+ QualType T = Arg.getIntegralType();
+ if (T->isCharType() || T->isWideCharType())
+ return SemaRef.Owned(new (SemaRef.Context) CharacterLiteral(
+ Arg.getAsIntegral()->getZExtValue(),
+ T->isWideCharType(),
+ T,
+ E->getSourceRange().getBegin()));
+ else if (T->isBooleanType())
+ return SemaRef.Owned(new (SemaRef.Context) CXXBoolLiteralExpr(
+ Arg.getAsIntegral()->getBoolValue(),
+ T,
+ E->getSourceRange().getBegin()));
+
+ return SemaRef.Owned(new (SemaRef.Context) IntegerLiteral(
+ *Arg.getAsIntegral(),
+ T,
+ E->getSourceRange().getBegin()));
+ }
+
+ if (OverloadedFunctionDecl *Ovl = dyn_cast<OverloadedFunctionDecl>(D)) {
+ // FIXME: instantiate each decl in the overload set
+ return SemaRef.Owned(new (SemaRef.Context) DeclRefExpr(Ovl,
+ SemaRef.Context.OverloadTy,
+ E->getLocation(),
+ false, false));
+ }
+
+ ValueDecl *NewD
+ = dyn_cast_or_null<ValueDecl>(SemaRef.InstantiateCurrentDeclRef(D));
+ if (!NewD)
+ return SemaRef.ExprError();
+
+ // FIXME: Build QualifiedDeclRefExpr?
+ QualType T = NewD->getType();
+ return SemaRef.Owned(new (SemaRef.Context) DeclRefExpr(NewD,
+ T.getNonReferenceType(),
+ E->getLocation(),
+ T->isDependentType(),
+ T->isDependentType()));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitParenExpr(ParenExpr *E) {
+ Sema::OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.Owned(new (SemaRef.Context) ParenExpr(
+ E->getLParen(), E->getRParen(),
+ (Expr *)SubExpr.release()));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitUnaryOperator(UnaryOperator *E) {
+ Sema::OwningExprResult Arg = Visit(E->getSubExpr());
+ if (Arg.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.CreateBuiltinUnaryOp(E->getOperatorLoc(),
+ E->getOpcode(),
+ move(Arg));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ Sema::OwningExprResult LHS = Visit(E->getLHS());
+ if (LHS.isInvalid())
+ return SemaRef.ExprError();
+
+ Sema::OwningExprResult RHS = Visit(E->getRHS());
+ if (RHS.isInvalid())
+ return SemaRef.ExprError();
+
+ // Since the overloaded array-subscript operator (operator[]) can
+ // only be a member function, we can make several simplifying
+ // assumptions here:
+ // 1) Normal name lookup (from the current scope) will not ever
+ // find any declarations of operator[] that won't also be found be
+ // member operator lookup, so it is safe to pass a NULL Scope
+ // during the instantiation to avoid the lookup entirely.
+ //
+ // 2) Neither normal name lookup nor argument-dependent lookup at
+ // template definition time will find any operators that won't be
+ // found at template instantiation time, so we do not need to
+ // cache the results of name lookup as we do for the binary
+ // operators.
+ SourceLocation LLocFake = ((Expr*)LHS.get())->getSourceRange().getBegin();
+ return SemaRef.ActOnArraySubscriptExpr(/*Scope=*/0, move(LHS),
+ /*FIXME:*/LLocFake,
+ move(RHS),
+ E->getRBracketLoc());
+}
+
+Sema::OwningExprResult TemplateExprInstantiator::VisitCallExpr(CallExpr *E) {
+ // Instantiate callee
+ OwningExprResult Callee = Visit(E->getCallee());
+ if (Callee.isInvalid())
+ return SemaRef.ExprError();
+
+ // Instantiate arguments
+ ASTOwningVector<&ActionBase::DeleteExpr> Args(SemaRef);
+ llvm::SmallVector<SourceLocation, 4> FakeCommaLocs;
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ OwningExprResult Arg = Visit(E->getArg(I));
+ if (Arg.isInvalid())
+ return SemaRef.ExprError();
+
+ FakeCommaLocs.push_back(
+ SemaRef.PP.getLocForEndOfToken(E->getArg(I)->getSourceRange().getEnd()));
+ Args.push_back(Arg.takeAs<Expr>());
+ }
+
+ SourceLocation FakeLParenLoc
+ = ((Expr *)Callee.get())->getSourceRange().getBegin();
+ return SemaRef.ActOnCallExpr(/*Scope=*/0, move(Callee),
+ /*FIXME:*/FakeLParenLoc,
+ move_arg(Args),
+ /*FIXME:*/&FakeCommaLocs.front(),
+ E->getRParenLoc());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitMemberExpr(MemberExpr *E) {
+ // Instantiate the base of the expression.
+ OwningExprResult Base = Visit(E->getBase());
+ if (Base.isInvalid())
+ return SemaRef.ExprError();
+
+ // FIXME: Handle declaration names here
+ SourceLocation FakeOperatorLoc =
+ SemaRef.PP.getLocForEndOfToken(E->getBase()->getSourceRange().getEnd());
+ return SemaRef.ActOnMemberReferenceExpr(/*Scope=*/0,
+ move(Base),
+ /*FIXME*/FakeOperatorLoc,
+ E->isArrow()? tok::arrow
+ : tok::period,
+ E->getMemberLoc(),
+ /*FIXME:*/*E->getMemberDecl()->getIdentifier(),
+ /*FIXME?*/Sema::DeclPtrTy::make((Decl*)0));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ SourceLocation FakeTypeLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getLParenLoc());
+ QualType T = SemaRef.InstantiateType(E->getType(), TemplateArgs,
+ FakeTypeLoc,
+ DeclarationName());
+ if (T.isNull())
+ return SemaRef.ExprError();
+
+ OwningExprResult Init = Visit(E->getInitializer());
+ if (Init.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnCompoundLiteral(E->getLParenLoc(),
+ T.getAsOpaquePtr(),
+ /*FIXME*/E->getLParenLoc(),
+ move(Init));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitBinaryOperator(BinaryOperator *E) {
+ Sema::OwningExprResult LHS = Visit(E->getLHS());
+ if (LHS.isInvalid())
+ return SemaRef.ExprError();
+
+ Sema::OwningExprResult RHS = Visit(E->getRHS());
+ if (RHS.isInvalid())
+ return SemaRef.ExprError();
+
+ Sema::OwningExprResult Result
+ = SemaRef.CreateBuiltinBinOp(E->getOperatorLoc(),
+ E->getOpcode(),
+ (Expr *)LHS.get(),
+ (Expr *)RHS.get());
+ if (Result.isInvalid())
+ return SemaRef.ExprError();
+
+ LHS.release();
+ RHS.release();
+ return move(Result);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCompoundAssignOperator(
+ CompoundAssignOperator *E) {
+ return VisitBinaryOperator(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ Sema::OwningExprResult First = Visit(E->getArg(0));
+ if (First.isInvalid())
+ return SemaRef.ExprError();
+
+ Expr *Args[2] = { (Expr *)First.get(), 0 };
+
+ Sema::OwningExprResult Second(SemaRef);
+ if (E->getNumArgs() == 2) {
+ Second = Visit(E->getArg(1));
+
+ if (Second.isInvalid())
+ return SemaRef.ExprError();
+
+ Args[1] = (Expr *)Second.get();
+ }
+
+ if (!E->isTypeDependent()) {
+ // Since our original expression was not type-dependent, we do not
+ // perform lookup again at instantiation time (C++ [temp.dep]p1).
+ // Instead, we just build the new overloaded operator call
+ // expression.
+ OwningExprResult Callee = Visit(E->getCallee());
+ if (Callee.isInvalid())
+ return SemaRef.ExprError();
+
+ First.release();
+ Second.release();
+
+ return SemaRef.Owned(new (SemaRef.Context) CXXOperatorCallExpr(
+ SemaRef.Context,
+ E->getOperator(),
+ Callee.takeAs<Expr>(),
+ Args, E->getNumArgs(),
+ E->getType(),
+ E->getOperatorLoc()));
+ }
+
+ bool isPostIncDec = E->getNumArgs() == 2 &&
+ (E->getOperator() == OO_PlusPlus || E->getOperator() == OO_MinusMinus);
+ if (E->getNumArgs() == 1 || isPostIncDec) {
+ if (!Args[0]->getType()->isOverloadableType()) {
+ // The argument is not of overloadable type, so try to create a
+ // built-in unary operation.
+ UnaryOperator::Opcode Opc
+ = UnaryOperator::getOverloadedOpcode(E->getOperator(), isPostIncDec);
+
+ return SemaRef.CreateBuiltinUnaryOp(E->getOperatorLoc(), Opc,
+ move(First));
+ }
+
+ // Fall through to perform overload resolution
+ } else {
+ assert(E->getNumArgs() == 2 && "Expected binary operation");
+
+ Sema::OwningExprResult Result(SemaRef);
+ if (!Args[0]->getType()->isOverloadableType() &&
+ !Args[1]->getType()->isOverloadableType()) {
+ // Neither of the arguments is an overloadable type, so try to
+ // create a built-in binary operation.
+ BinaryOperator::Opcode Opc =
+ BinaryOperator::getOverloadedOpcode(E->getOperator());
+ Result = SemaRef.CreateBuiltinBinOp(E->getOperatorLoc(), Opc,
+ Args[0], Args[1]);
+ if (Result.isInvalid())
+ return SemaRef.ExprError();
+
+ First.release();
+ Second.release();
+ return move(Result);
+ }
+
+ // Fall through to perform overload resolution.
+ }
+
+ // Compute the set of functions that were found at template
+ // definition time.
+ Sema::FunctionSet Functions;
+ DeclRefExpr *DRE = cast<DeclRefExpr>(E->getCallee());
+ OverloadedFunctionDecl *Overloads
+ = cast<OverloadedFunctionDecl>(DRE->getDecl());
+
+ // FIXME: Do we have to check
+ // IsAcceptableNonMemberOperatorCandidate for each of these?
+ for (OverloadedFunctionDecl::function_iterator
+ F = Overloads->function_begin(),
+ FEnd = Overloads->function_end();
+ F != FEnd; ++F)
+ Functions.insert(*F);
+
+ // Add any functions found via argument-dependent lookup.
+ DeclarationName OpName
+ = SemaRef.Context.DeclarationNames.getCXXOperatorName(E->getOperator());
+ SemaRef.ArgumentDependentLookup(OpName, Args, E->getNumArgs(), Functions);
+
+ // Create the overloaded operator invocation.
+ if (E->getNumArgs() == 1 || isPostIncDec) {
+ UnaryOperator::Opcode Opc
+ = UnaryOperator::getOverloadedOpcode(E->getOperator(), isPostIncDec);
+ return SemaRef.CreateOverloadedUnaryOp(E->getOperatorLoc(), Opc,
+ Functions, move(First));
+ }
+
+ // FIXME: This would be far less ugly if CreateOverloadedBinOp took in ExprArg
+ // arguments!
+ BinaryOperator::Opcode Opc =
+ BinaryOperator::getOverloadedOpcode(E->getOperator());
+ OwningExprResult Result
+ = SemaRef.CreateOverloadedBinOp(E->getOperatorLoc(), Opc,
+ Functions, Args[0], Args[1]);
+
+ if (Result.isInvalid())
+ return SemaRef.ExprError();
+
+ First.release();
+ Second.release();
+ return move(Result);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXConditionDeclExpr(CXXConditionDeclExpr *E) {
+ VarDecl *Var
+ = cast_or_null<VarDecl>(SemaRef.InstantiateDecl(E->getVarDecl(),
+ SemaRef.CurContext,
+ TemplateArgs));
+ if (!Var)
+ return SemaRef.ExprError();
+
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(E->getVarDecl(), Var);
+ return SemaRef.Owned(new (SemaRef.Context) CXXConditionDeclExpr(
+ E->getStartLoc(),
+ SourceLocation(),
+ Var));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitConditionalOperator(ConditionalOperator *E) {
+ Sema::OwningExprResult Cond = Visit(E->getCond());
+ if (Cond.isInvalid())
+ return SemaRef.ExprError();
+
+ Sema::OwningExprResult LHS = SemaRef.InstantiateExpr(E->getLHS(),
+ TemplateArgs);
+ if (LHS.isInvalid())
+ return SemaRef.ExprError();
+
+ Sema::OwningExprResult RHS = Visit(E->getRHS());
+ if (RHS.isInvalid())
+ return SemaRef.ExprError();
+
+ if (!E->isTypeDependent()) {
+ // Since our original expression was not type-dependent, we do not
+ // perform lookup again at instantiation time (C++ [temp.dep]p1).
+ // Instead, we just build the new conditional operator call expression.
+ return SemaRef.Owned(new (SemaRef.Context) ConditionalOperator(
+ Cond.takeAs<Expr>(),
+ LHS.takeAs<Expr>(),
+ RHS.takeAs<Expr>(),
+ E->getType()));
+ }
+
+
+ return SemaRef.ActOnConditionalOp(/*FIXME*/E->getCond()->getLocEnd(),
+ /*FIXME*/E->getFalseExpr()->getLocStart(),
+ move(Cond), move(LHS), move(RHS));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ return SemaRef.ActOnAddrLabel(E->getAmpAmpLoc(),
+ E->getLabelLoc(),
+ E->getLabel()->getID());
+}
+
+Sema::OwningExprResult TemplateExprInstantiator::VisitStmtExpr(StmtExpr *E) {
+ Sema::OwningStmtResult SubStmt
+ = SemaRef.InstantiateCompoundStmt(E->getSubStmt(), TemplateArgs, true);
+ if (SubStmt.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnStmtExpr(E->getLParenLoc(), move(SubStmt),
+ E->getRParenLoc());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) {
+ assert(false && "__builtin_types_compatible_p is not legal in C++");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ ASTOwningVector<&ActionBase::DeleteExpr> SubExprs(SemaRef);
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I) {
+ OwningExprResult SubExpr = Visit(E->getExpr(I));
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ SubExprs.push_back(SubExpr.takeAs<Expr>());
+ }
+
+ // Find the declaration for __builtin_shufflevector
+ const IdentifierInfo &Name
+ = SemaRef.Context.Idents.get("__builtin_shufflevector");
+ TranslationUnitDecl *TUDecl = SemaRef.Context.getTranslationUnitDecl();
+ DeclContext::lookup_result Lookup
+ = TUDecl->lookup(SemaRef.Context, DeclarationName(&Name));
+ assert(Lookup.first != Lookup.second && "No __builtin_shufflevector?");
+
+ // Build a reference to the __builtin_shufflevector builtin
+ FunctionDecl *Builtin = cast<FunctionDecl>(*Lookup.first);
+ Expr *Callee = new (SemaRef.Context) DeclRefExpr(Builtin, Builtin->getType(),
+ E->getBuiltinLoc(),
+ false, false);
+ SemaRef.UsualUnaryConversions(Callee);
+
+ // Build the CallExpr
+ CallExpr *TheCall = new (SemaRef.Context) CallExpr(SemaRef.Context, Callee,
+ SubExprs.takeAs<Expr>(),
+ SubExprs.size(),
+ Builtin->getResultType(),
+ E->getRParenLoc());
+ OwningExprResult OwnedCall(SemaRef.Owned(TheCall));
+
+ // Type-check the __builtin_shufflevector expression.
+ OwningExprResult Result = SemaRef.SemaBuiltinShuffleVector(TheCall);
+ if (Result.isInvalid())
+ return SemaRef.ExprError();
+
+ OwnedCall.release();
+ return move(Result);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitChooseExpr(ChooseExpr *E) {
+ OwningExprResult Cond = Visit(E->getCond());
+ if (Cond.isInvalid())
+ return SemaRef.ExprError();
+
+ OwningExprResult LHS = SemaRef.InstantiateExpr(E->getLHS(), TemplateArgs);
+ if (LHS.isInvalid())
+ return SemaRef.ExprError();
+
+ OwningExprResult RHS = Visit(E->getRHS());
+ if (RHS.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnChooseExpr(E->getBuiltinLoc(),
+ move(Cond), move(LHS), move(RHS),
+ E->getRParenLoc());
+}
+
+Sema::OwningExprResult TemplateExprInstantiator::VisitVAArgExpr(VAArgExpr *E) {
+ OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ SourceLocation FakeTypeLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getSubExpr()->getSourceRange()
+ .getEnd());
+ QualType T = SemaRef.InstantiateType(E->getType(), TemplateArgs,
+ /*FIXME:*/FakeTypeLoc,
+ DeclarationName());
+ if (T.isNull())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnVAArg(E->getBuiltinLoc(), move(SubExpr),
+ T.getAsOpaquePtr(), E->getRParenLoc());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitInitListExpr(InitListExpr *E) {
+ ASTOwningVector<&ActionBase::DeleteExpr, 4> Inits(SemaRef);
+ for (unsigned I = 0, N = E->getNumInits(); I != N; ++I) {
+ OwningExprResult Init = Visit(E->getInit(I));
+ if (Init.isInvalid())
+ return SemaRef.ExprError();
+ Inits.push_back(Init.takeAs<Expr>());
+ }
+
+ return SemaRef.ActOnInitList(E->getLBraceLoc(), move_arg(Inits),
+ E->getRBraceLoc());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ Designation Desig;
+
+ // Instantiate the initializer value
+ OwningExprResult Init = Visit(E->getInit());
+ if (Init.isInvalid())
+ return SemaRef.ExprError();
+
+ // Instantiate the designators.
+ ASTOwningVector<&ActionBase::DeleteExpr, 4> ArrayExprs(SemaRef);
+ for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
+ DEnd = E->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ Desig.AddDesignator(Designator::getField(D->getFieldName(),
+ D->getDotLoc(),
+ D->getFieldLoc()));
+ continue;
+ }
+
+ if (D->isArrayDesignator()) {
+ OwningExprResult Index = Visit(E->getArrayIndex(*D));
+ if (Index.isInvalid())
+ return SemaRef.ExprError();
+
+ Desig.AddDesignator(Designator::getArray(Index.get(),
+ D->getLBracketLoc()));
+
+ ArrayExprs.push_back(Index.release());
+ continue;
+ }
+
+ assert(D->isArrayRangeDesignator() && "New kind of designator?");
+ OwningExprResult Start = Visit(E->getArrayRangeStart(*D));
+ if (Start.isInvalid())
+ return SemaRef.ExprError();
+
+ OwningExprResult End = Visit(E->getArrayRangeEnd(*D));
+ if (End.isInvalid())
+ return SemaRef.ExprError();
+
+ Desig.AddDesignator(Designator::getArrayRange(Start.get(),
+ End.get(),
+ D->getLBracketLoc(),
+ D->getEllipsisLoc()));
+
+ ArrayExprs.push_back(Start.release());
+ ArrayExprs.push_back(End.release());
+ }
+
+ OwningExprResult Result =
+ SemaRef.ActOnDesignatedInitializer(Desig,
+ E->getEqualOrColonLoc(),
+ E->usesGNUSyntax(),
+ move(Init));
+ if (Result.isInvalid())
+ return SemaRef.ExprError();
+
+ ArrayExprs.take();
+ return move(Result);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitImplicitValueInitExpr(
+ ImplicitValueInitExpr *E) {
+ assert(!E->isTypeDependent() && !E->isValueDependent() &&
+ "ImplicitValueInitExprs are never dependent");
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+ OwningExprResult Base = Visit(E->getBase());
+ if (Base.isInvalid())
+ return SemaRef.ExprError();
+
+ SourceLocation FakeOperatorLoc =
+ SemaRef.PP.getLocForEndOfToken(E->getBase()->getSourceRange().getEnd());
+ return SemaRef.ActOnMemberReferenceExpr(/*Scope=*/0,
+ move(Base),
+ /*FIXME*/FakeOperatorLoc,
+ tok::period,
+ E->getAccessorLoc(),
+ E->getAccessor(),
+ /*FIXME?*/Sema::DeclPtrTy::make((Decl*)0));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitBlockExpr(BlockExpr *E) {
+ assert(false && "FIXME:Template instantiation for blocks is unimplemented");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ assert(false && "FIXME:Template instantiation for blocks is unimplemented");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
+ bool isSizeOf = E->isSizeOf();
+
+ if (E->isArgumentType()) {
+ QualType T = E->getArgumentType();
+ if (T->isDependentType()) {
+ T = SemaRef.InstantiateType(T, TemplateArgs,
+ /*FIXME*/E->getOperatorLoc(),
+ &SemaRef.PP.getIdentifierTable().get("sizeof"));
+ if (T.isNull())
+ return SemaRef.ExprError();
+ }
+
+ return SemaRef.CreateSizeOfAlignOfExpr(T, E->getOperatorLoc(), isSizeOf,
+ E->getSourceRange());
+ }
+
+ Sema::OwningExprResult Arg = Visit(E->getArgumentExpr());
+ if (Arg.isInvalid())
+ return SemaRef.ExprError();
+
+ Sema::OwningExprResult Result
+ = SemaRef.CreateSizeOfAlignOfExpr((Expr *)Arg.get(), E->getOperatorLoc(),
+ isSizeOf, E->getSourceRange());
+ if (Result.isInvalid())
+ return SemaRef.ExprError();
+
+ Arg.release();
+ return move(Result);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitUnresolvedDeclRefExpr(UnresolvedDeclRefExpr *E) {
+ NestedNameSpecifier *NNS
+ = SemaRef.InstantiateNestedNameSpecifier(E->getQualifier(),
+ E->getQualifierRange(),
+ TemplateArgs);
+ if (!NNS)
+ return SemaRef.ExprError();
+
+ CXXScopeSpec SS;
+ SS.setRange(E->getQualifierRange());
+ SS.setScopeRep(NNS);
+
+ // FIXME: We're passing in a NULL scope, because
+ // ActOnDeclarationNameExpr doesn't actually use the scope when we
+ // give it a non-empty scope specifier. Investigate whether it would
+ // be better to refactor ActOnDeclarationNameExpr.
+ return SemaRef.ActOnDeclarationNameExpr(/*Scope=*/0, E->getLocation(),
+ E->getDeclName(),
+ /*HasTrailingLParen=*/false,
+ &SS,
+ /*FIXME:isAddressOfOperand=*/false);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXTemporaryObjectExpr(
+ CXXTemporaryObjectExpr *E) {
+ QualType T = E->getType();
+ if (T->isDependentType()) {
+ T = SemaRef.InstantiateType(T, TemplateArgs,
+ E->getTypeBeginLoc(), DeclarationName());
+ if (T.isNull())
+ return SemaRef.ExprError();
+ }
+
+ ASTOwningVector<&ActionBase::DeleteExpr> Args(SemaRef);
+ Args.reserve(E->getNumArgs());
+ for (CXXTemporaryObjectExpr::arg_iterator Arg = E->arg_begin(),
+ ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ OwningExprResult InstantiatedArg = Visit(*Arg);
+ if (InstantiatedArg.isInvalid())
+ return SemaRef.ExprError();
+
+ Args.push_back((Expr *)InstantiatedArg.release());
+ }
+
+ SourceLocation CommaLoc;
+ // FIXME: HACK!
+ if (Args.size() > 1) {
+ Expr *First = (Expr *)Args[0];
+ CommaLoc
+ = SemaRef.PP.getLocForEndOfToken(First->getSourceRange().getEnd());
+ }
+ return SemaRef.ActOnCXXTypeConstructExpr(SourceRange(E->getTypeBeginLoc()
+ /*, FIXME*/),
+ T.getAsOpaquePtr(),
+ /*FIXME*/E->getTypeBeginLoc(),
+ move_arg(Args),
+ /*HACK*/&CommaLoc,
+ E->getSourceRange().getEnd());
+}
+
+Sema::OwningExprResult TemplateExprInstantiator::VisitCastExpr(CastExpr *E) {
+ assert(false && "Cannot instantiate abstract CastExpr");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult TemplateExprInstantiator::VisitImplicitCastExpr(
+ ImplicitCastExpr *E) {
+ assert(!E->isTypeDependent() && "Implicit casts must have known types");
+
+ Sema::OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ ImplicitCastExpr *ICE =
+ new (SemaRef.Context) ImplicitCastExpr(E->getType(),
+ (Expr *)SubExpr.release(),
+ E->isLvalueCast());
+ return SemaRef.Owned(ICE);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ assert(false && "Cannot instantiate abstract ExplicitCastExpr");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ // Instantiate the type that we're casting to.
+ SourceLocation TypeStartLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getLParenLoc());
+ QualType ExplicitTy = SemaRef.InstantiateType(E->getTypeAsWritten(),
+ TemplateArgs,
+ TypeStartLoc,
+ DeclarationName());
+ if (ExplicitTy.isNull())
+ return SemaRef.ExprError();
+
+ // Instantiate the subexpression.
+ OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnCastExpr(E->getLParenLoc(),
+ ExplicitTy.getAsOpaquePtr(),
+ E->getRParenLoc(),
+ move(SubExpr));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ return VisitCallExpr(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ // Figure out which cast operator we're dealing with.
+ tok::TokenKind Kind;
+ switch (E->getStmtClass()) {
+ case Stmt::CXXStaticCastExprClass:
+ Kind = tok::kw_static_cast;
+ break;
+
+ case Stmt::CXXDynamicCastExprClass:
+ Kind = tok::kw_dynamic_cast;
+ break;
+
+ case Stmt::CXXReinterpretCastExprClass:
+ Kind = tok::kw_reinterpret_cast;
+ break;
+
+ case Stmt::CXXConstCastExprClass:
+ Kind = tok::kw_const_cast;
+ break;
+
+ default:
+ assert(false && "Invalid C++ named cast");
+ return SemaRef.ExprError();
+ }
+
+ // Instantiate the type that we're casting to.
+ SourceLocation TypeStartLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getOperatorLoc());
+ QualType ExplicitTy = SemaRef.InstantiateType(E->getTypeAsWritten(),
+ TemplateArgs,
+ TypeStartLoc,
+ DeclarationName());
+ if (ExplicitTy.isNull())
+ return SemaRef.ExprError();
+
+ // Instantiate the subexpression.
+ OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ SourceLocation FakeLAngleLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getOperatorLoc());
+ SourceLocation FakeRAngleLoc = E->getSubExpr()->getSourceRange().getBegin();
+ SourceLocation FakeRParenLoc
+ = SemaRef.PP.getLocForEndOfToken(
+ E->getSubExpr()->getSourceRange().getEnd());
+ return SemaRef.ActOnCXXNamedCast(E->getOperatorLoc(), Kind,
+ /*FIXME:*/FakeLAngleLoc,
+ ExplicitTy.getAsOpaquePtr(),
+ /*FIXME:*/FakeRAngleLoc,
+ /*FIXME:*/FakeRAngleLoc,
+ move(SubExpr),
+ /*FIXME:*/FakeRParenLoc);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXReinterpretCastExpr(
+ CXXReinterpretCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXThisExpr(CXXThisExpr *E) {
+ QualType ThisType =
+ cast<CXXMethodDecl>(SemaRef.CurContext)->getThisType(SemaRef.Context);
+
+ CXXThisExpr *TE =
+ new (SemaRef.Context) CXXThisExpr(E->getLocStart(), ThisType);
+
+ return SemaRef.Owned(TE);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ if (E->isTypeOperand()) {
+ QualType T = SemaRef.InstantiateType(E->getTypeOperand(),
+ TemplateArgs,
+ /*FIXME*/E->getSourceRange().getBegin(),
+ DeclarationName());
+ if (T.isNull())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnCXXTypeid(E->getSourceRange().getBegin(),
+ /*FIXME*/E->getSourceRange().getBegin(),
+ true, T.getAsOpaquePtr(),
+ E->getSourceRange().getEnd());
+ }
+
+ OwningExprResult Operand = Visit(E->getExprOperand());
+ if (Operand.isInvalid())
+ return SemaRef.ExprError();
+
+ OwningExprResult Result
+ = SemaRef.ActOnCXXTypeid(E->getSourceRange().getBegin(),
+ /*FIXME*/E->getSourceRange().getBegin(),
+ false, Operand.get(),
+ E->getSourceRange().getEnd());
+ if (Result.isInvalid())
+ return SemaRef.ExprError();
+
+ Operand.release(); // FIXME: since ActOnCXXTypeid silently took ownership
+ return move(Result);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ OwningExprResult SubExpr(SemaRef, (void *)0);
+ if (E->getSubExpr()) {
+ SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+ }
+
+ return SemaRef.ActOnCXXThrow(E->getThrowLoc(), move(SubExpr));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ assert(false &&
+ "FIXME: Instantiation for default arguments is unimplemented");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXBindTemporaryExpr(
+ CXXBindTemporaryExpr *E) {
+ OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.MaybeBindToTemporary(SubExpr.takeAs<Expr>());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ assert(!cast<CXXRecordDecl>(E->getConstructor()->getDeclContext())
+ ->isDependentType() && "Dependent constructor shouldn't be here");
+
+ QualType T = SemaRef.InstantiateType(E->getType(), TemplateArgs,
+ /*FIXME*/E->getSourceRange().getBegin(),
+ DeclarationName());
+ if (T.isNull())
+ return SemaRef.ExprError();
+
+ ASTOwningVector<&ActionBase::DeleteExpr> Args(SemaRef);
+ for (CXXConstructExpr::arg_iterator Arg = E->arg_begin(),
+ ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ OwningExprResult ArgInst = Visit(*Arg);
+ if (ArgInst.isInvalid())
+ return SemaRef.ExprError();
+
+ Args.push_back(ArgInst.takeAs<Expr>());
+ }
+
+ return SemaRef.Owned(CXXConstructExpr::Create(SemaRef.Context, T,
+ E->getConstructor(),
+ E->isElidable(),
+ Args.takeAs<Expr>(),
+ Args.size()));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXFunctionalCastExpr(
+ CXXFunctionalCastExpr *E) {
+ // Instantiate the type that we're casting to.
+ QualType ExplicitTy = SemaRef.InstantiateType(E->getTypeAsWritten(),
+ TemplateArgs,
+ E->getTypeBeginLoc(),
+ DeclarationName());
+ if (ExplicitTy.isNull())
+ return SemaRef.ExprError();
+
+ // Instantiate the subexpression.
+ OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ // FIXME: The end of the type's source range is wrong
+ Expr *Sub = SubExpr.takeAs<Expr>();
+ return SemaRef.ActOnCXXTypeConstructExpr(SourceRange(E->getTypeBeginLoc()),
+ ExplicitTy.getAsOpaquePtr(),
+ /*FIXME:*/E->getTypeBeginLoc(),
+ Sema::MultiExprArg(SemaRef,
+ (void **)&Sub,
+ 1),
+ 0,
+ E->getRParenLoc());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+ return SemaRef.Clone(E);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXNewExpr(CXXNewExpr *E) {
+ // Instantiate the type that we're allocating
+ QualType AllocType = SemaRef.InstantiateType(E->getAllocatedType(),
+ TemplateArgs,
+ /*FIXME:*/E->getSourceRange().getBegin(),
+ DeclarationName());
+ if (AllocType.isNull())
+ return SemaRef.ExprError();
+
+ // Instantiate the size of the array we're allocating (if any).
+ OwningExprResult ArraySize = SemaRef.InstantiateExpr(E->getArraySize(),
+ TemplateArgs);
+ if (ArraySize.isInvalid())
+ return SemaRef.ExprError();
+
+ // Instantiate the placement arguments (if any).
+ ASTOwningVector<&ActionBase::DeleteExpr> PlacementArgs(SemaRef);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
+ OwningExprResult Arg = Visit(E->getPlacementArg(I));
+ if (Arg.isInvalid())
+ return SemaRef.ExprError();
+
+ PlacementArgs.push_back(Arg.take());
+ }
+
+ // Instantiate the constructor arguments (if any).
+ ASTOwningVector<&ActionBase::DeleteExpr> ConstructorArgs(SemaRef);
+ for (unsigned I = 0, N = E->getNumConstructorArgs(); I != N; ++I) {
+ OwningExprResult Arg = Visit(E->getConstructorArg(I));
+ if (Arg.isInvalid())
+ return SemaRef.ExprError();
+
+ ConstructorArgs.push_back(Arg.take());
+ }
+
+ return SemaRef.BuildCXXNew(E->getSourceRange().getBegin(),
+ E->isGlobalNew(),
+ /*FIXME*/SourceLocation(),
+ move_arg(PlacementArgs),
+ /*FIXME*/SourceLocation(),
+ E->isParenTypeId(),
+ AllocType,
+ /*FIXME*/E->getSourceRange().getBegin(),
+ SourceRange(),
+ move(ArraySize),
+ /*FIXME*/SourceLocation(),
+ Sema::MultiExprArg(SemaRef,
+ ConstructorArgs.take(),
+ ConstructorArgs.size()),
+ E->getSourceRange().getEnd());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ OwningExprResult Operand = Visit(E->getArgument());
+ if (Operand.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnCXXDelete(E->getSourceRange().getBegin(),
+ E->isGlobalDelete(),
+ E->isArrayForm(),
+ move(Operand));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ QualType T = SemaRef.InstantiateType(E->getQueriedType(), TemplateArgs,
+ /*FIXME*/E->getSourceRange().getBegin(),
+ DeclarationName());
+ if (T.isNull())
+ return SemaRef.ExprError();
+
+ SourceLocation FakeLParenLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getSourceRange().getBegin());
+ return SemaRef.ActOnUnaryTypeTrait(E->getTrait(),
+ E->getSourceRange().getBegin(),
+ /*FIXME*/FakeLParenLoc,
+ T.getAsOpaquePtr(),
+ E->getSourceRange().getEnd());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitQualifiedDeclRefExpr(QualifiedDeclRefExpr *E) {
+ NestedNameSpecifier *NNS
+ = SemaRef.InstantiateNestedNameSpecifier(E->getQualifier(),
+ E->getQualifierRange(),
+ TemplateArgs);
+ if (!NNS)
+ return SemaRef.ExprError();
+
+ CXXScopeSpec SS;
+ SS.setRange(E->getQualifierRange());
+ SS.setScopeRep(NNS);
+ return SemaRef.ActOnDeclarationNameExpr(/*Scope=*/0,
+ E->getLocation(),
+ E->getDecl()->getDeclName(),
+ /*Trailing lparen=*/false,
+ &SS,
+ /*FIXME:*/false);
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXExprWithTemporaries(
+ CXXExprWithTemporaries *E) {
+ OwningExprResult SubExpr = Visit(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return SemaRef.ExprError();
+
+ return SemaRef.ActOnFinishFullExpr(move(SubExpr));
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *E) {
+ QualType T = SemaRef.InstantiateType(E->getTypeAsWritten(), TemplateArgs,
+ E->getTypeBeginLoc(),
+ DeclarationName());
+ if (T.isNull())
+ return SemaRef.ExprError();
+
+ ASTOwningVector<&ActionBase::DeleteExpr> Args(SemaRef);
+ llvm::SmallVector<SourceLocation, 8> FakeCommaLocs;
+ for (CXXUnresolvedConstructExpr::arg_iterator Arg = E->arg_begin(),
+ ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ OwningExprResult InstArg = Visit(*Arg);
+ if (InstArg.isInvalid())
+ return SemaRef.ExprError();
+
+ FakeCommaLocs.push_back(
+ SemaRef.PP.getLocForEndOfToken((*Arg)->getSourceRange().getEnd()));
+ Args.push_back(InstArg.takeAs<Expr>());
+ }
+
+ // FIXME: The end of the type range isn't exactly correct.
+ // FIXME: we're faking the locations of the commas
+ return SemaRef.ActOnCXXTypeConstructExpr(SourceRange(E->getTypeBeginLoc(),
+ E->getLParenLoc()),
+ T.getAsOpaquePtr(),
+ E->getLParenLoc(),
+ move_arg(Args),
+ &FakeCommaLocs.front(),
+ E->getRParenLoc());
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitCXXUnresolvedMemberExpr(
+ CXXUnresolvedMemberExpr *E) {
+ // Instantiate the base of the expression.
+ OwningExprResult Base = Visit(E->getBase());
+ if (Base.isInvalid())
+ return SemaRef.ExprError();
+
+ // FIXME: Instantiate the declaration name.
+ return SemaRef.ActOnMemberReferenceExpr(/*Scope=*/0,
+ move(Base), E->getOperatorLoc(),
+ E->isArrow()? tok::arrow
+ : tok::period,
+ E->getMemberLoc(),
+ /*FIXME:*/*E->getMember().getAsIdentifierInfo(),
+ /*FIXME?*/Sema::DeclPtrTy::make((Decl*)0));
+}
+
+//----------------------------------------------------------------------------
+// Objective-C Expressions
+//----------------------------------------------------------------------------
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+TemplateExprInstantiator::VisitObjCSuperExpr(ObjCSuperExpr *E) {
+ assert(false && "FIXME: Template instantiations for ObjC expressions");
+ return SemaRef.ExprError();
+}
+
+Sema::OwningExprResult
+Sema::InstantiateExpr(Expr *E, const TemplateArgumentList &TemplateArgs) {
+ if (!E)
+ return Owned((Expr *)0);
+
+ TemplateExprInstantiator Instantiator(*this, TemplateArgs);
+ return Instantiator.Visit(E);
+}
diff --git a/lib/Sema/SemaTemplateInstantiateStmt.cpp b/lib/Sema/SemaTemplateInstantiateStmt.cpp
new file mode 100644
index 0000000..1f69479
--- /dev/null
+++ b/lib/Sema/SemaTemplateInstantiateStmt.cpp
@@ -0,0 +1,443 @@
+//===--- SemaTemplateInstantiateStmt.cpp - C++ Template Stmt Instantiation ===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation for statements.
+//
+//===----------------------------------------------------------------------===/
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Parse/DeclSpec.h"
+#include "llvm/Support/Compiler.h"
+using namespace clang;
+
+namespace {
+ class VISIBILITY_HIDDEN TemplateStmtInstantiator
+ : public StmtVisitor<TemplateStmtInstantiator, Sema::OwningStmtResult> {
+ Sema &SemaRef;
+ const TemplateArgumentList &TemplateArgs;
+
+ template<typename T>
+ Sema::FullExprArg FullExpr(T &expr) {
+ return SemaRef.FullExpr(expr);
+ }
+
+ public:
+ typedef Sema::OwningExprResult OwningExprResult;
+ typedef Sema::OwningStmtResult OwningStmtResult;
+
+ TemplateStmtInstantiator(Sema &SemaRef,
+ const TemplateArgumentList &TemplateArgs)
+ : SemaRef(SemaRef), TemplateArgs(TemplateArgs) { }
+
+ // Declare VisitXXXStmt nodes for all of the statement kinds.
+#define STMT(Type, Base) OwningStmtResult Visit##Type(Type *S);
+#define EXPR(Type, Base)
+#include "clang/AST/StmtNodes.def"
+
+ // Visit an expression (which will use the expression
+ // instantiator).
+ OwningStmtResult VisitExpr(Expr *E);
+
+ // Base case. I'm supposed to ignore this.
+ OwningStmtResult VisitStmt(Stmt *S) {
+ S->dump();
+ assert(false && "Cannot instantiate this kind of statement");
+ return SemaRef.StmtError();
+ }
+ };
+}
+
+//===----------------------------------------------------------------------===/
+// Common/C statements
+//===----------------------------------------------------------------------===/
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitDeclStmt(DeclStmt *S) {
+ llvm::SmallVector<Decl *, 4> Decls;
+ for (DeclStmt::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ D != DEnd; ++D) {
+ Decl *Instantiated = SemaRef.InstantiateDecl(*D, SemaRef.CurContext,
+ TemplateArgs);
+ if (!Instantiated)
+ return SemaRef.StmtError();
+
+ Decls.push_back(Instantiated);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(*D, Instantiated);
+ }
+
+ return SemaRef.Owned(new (SemaRef.Context) DeclStmt(
+ DeclGroupRef::Create(SemaRef.Context,
+ &Decls[0],
+ Decls.size()),
+ S->getStartLoc(),
+ S->getEndLoc()));
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitNullStmt(NullStmt *S) {
+ return SemaRef.Owned(S->Clone(SemaRef.Context));
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitLabelStmt(LabelStmt *S) {
+ OwningStmtResult SubStmt = Visit(S->getSubStmt());
+
+ if (SubStmt.isInvalid())
+ return SemaRef.StmtError();
+
+ // FIXME: Pass the real colon loc in.
+ return SemaRef.ActOnLabelStmt(S->getIdentLoc(), S->getID(), SourceLocation(),
+ move(SubStmt));
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitGotoStmt(GotoStmt *S) {
+ return SemaRef.ActOnGotoStmt(S->getGotoLoc(), S->getLabelLoc(),
+ S->getLabel()->getID());
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ OwningExprResult Target = SemaRef.InstantiateExpr(S->getTarget(),
+ TemplateArgs);
+ if (Target.isInvalid())
+ return SemaRef.StmtError();
+
+ return SemaRef.ActOnIndirectGotoStmt(S->getGotoLoc(), S->getStarLoc(),
+ move(Target));
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitBreakStmt(BreakStmt *S) {
+ return SemaRef.Owned(S->Clone(SemaRef.Context));
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitContinueStmt(ContinueStmt *S) {
+ return SemaRef.Owned(S->Clone(SemaRef.Context));
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitReturnStmt(ReturnStmt *S) {
+ Sema::OwningExprResult Result = SemaRef.ExprEmpty();
+ if (Expr *E = S->getRetValue()) {
+ Result = SemaRef.InstantiateExpr(E, TemplateArgs);
+
+ if (Result.isInvalid())
+ return SemaRef.StmtError();
+ }
+
+ return SemaRef.ActOnReturnStmt(S->getReturnLoc(), FullExpr(Result));
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitCompoundStmt(CompoundStmt *S) {
+ return SemaRef.InstantiateCompoundStmt(S, TemplateArgs, false);
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitSwitchCase(SwitchCase *S) {
+ assert(false && "SwitchCase statements are never directly instantiated");
+ return SemaRef.StmtError();
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitCaseStmt(CaseStmt *S) {
+ // Instantiate left-hand case value.
+ OwningExprResult LHS = SemaRef.InstantiateExpr(S->getLHS(), TemplateArgs);
+ if (LHS.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate right-hand case value (for the GNU case-range extension).
+ OwningExprResult RHS = SemaRef.InstantiateExpr(S->getRHS(), TemplateArgs);
+ if (RHS.isInvalid())
+ return SemaRef.StmtError();
+
+ // Build the case statement.
+ OwningStmtResult Case = SemaRef.ActOnCaseStmt(S->getCaseLoc(),
+ move(LHS),
+ S->getEllipsisLoc(),
+ move(RHS),
+ S->getColonLoc());
+ if (Case.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the statement following the case
+ OwningStmtResult SubStmt = SemaRef.InstantiateStmt(S->getSubStmt(),
+ TemplateArgs);
+ if (SubStmt.isInvalid())
+ return SemaRef.StmtError();
+
+ SemaRef.ActOnCaseStmtBody(Case.get(), move(SubStmt));
+ return move(Case);
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitDefaultStmt(DefaultStmt *S) {
+ // Instantiate the statement following the default case
+ OwningStmtResult SubStmt = SemaRef.InstantiateStmt(S->getSubStmt(),
+ TemplateArgs);
+ if (SubStmt.isInvalid())
+ return SemaRef.StmtError();
+
+ return SemaRef.ActOnDefaultStmt(S->getDefaultLoc(),
+ S->getColonLoc(),
+ move(SubStmt),
+ /*CurScope=*/0);
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitIfStmt(IfStmt *S) {
+ // Instantiate the condition
+ OwningExprResult Cond = SemaRef.InstantiateExpr(S->getCond(), TemplateArgs);
+ if (Cond.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the "then" branch.
+ OwningStmtResult Then = SemaRef.InstantiateStmt(S->getThen(), TemplateArgs);
+ if (Then.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the "else" branch.
+ OwningStmtResult Else = SemaRef.InstantiateStmt(S->getElse(), TemplateArgs);
+ if (Else.isInvalid())
+ return SemaRef.StmtError();
+
+ return SemaRef.ActOnIfStmt(S->getIfLoc(), FullExpr(Cond), move(Then),
+ S->getElseLoc(), move(Else));
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitSwitchStmt(SwitchStmt *S) {
+ // Instantiate the condition.
+ OwningExprResult Cond = SemaRef.InstantiateExpr(S->getCond(), TemplateArgs);
+ if (Cond.isInvalid())
+ return SemaRef.StmtError();
+
+ // Start the switch statement itself.
+ OwningStmtResult Switch = SemaRef.ActOnStartOfSwitchStmt(move(Cond));
+ if (Switch.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the body of the switch statement.
+ OwningStmtResult Body = SemaRef.InstantiateStmt(S->getBody(), TemplateArgs);
+ if (Body.isInvalid())
+ return SemaRef.StmtError();
+
+ // Complete the switch statement.
+ return SemaRef.ActOnFinishSwitchStmt(S->getSwitchLoc(), move(Switch),
+ move(Body));
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitWhileStmt(WhileStmt *S) {
+ // Instantiate the condition
+ OwningExprResult Cond = SemaRef.InstantiateExpr(S->getCond(), TemplateArgs);
+ if (Cond.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the body
+ OwningStmtResult Body = SemaRef.InstantiateStmt(S->getBody(), TemplateArgs);
+ if (Body.isInvalid())
+ return SemaRef.StmtError();
+
+ return SemaRef.ActOnWhileStmt(S->getWhileLoc(), FullExpr(Cond), move(Body));
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitDoStmt(DoStmt *S) {
+ // Instantiate the condition
+ OwningExprResult Cond = SemaRef.InstantiateExpr(S->getCond(), TemplateArgs);
+ if (Cond.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the body
+ OwningStmtResult Body = SemaRef.InstantiateStmt(S->getBody(), TemplateArgs);
+ if (Body.isInvalid())
+ return SemaRef.StmtError();
+
+ return SemaRef.ActOnDoStmt(S->getDoLoc(), move(Body), S->getWhileLoc(),
+ move(Cond));
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitForStmt(ForStmt *S) {
+ // Instantiate the initialization statement
+ OwningStmtResult Init = SemaRef.InstantiateStmt(S->getInit(), TemplateArgs);
+ if (Init.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the condition
+ OwningExprResult Cond = SemaRef.InstantiateExpr(S->getCond(), TemplateArgs);
+ if (Cond.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the increment
+ OwningExprResult Inc = SemaRef.InstantiateExpr(S->getInc(), TemplateArgs);
+ if (Inc.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the body
+ OwningStmtResult Body = SemaRef.InstantiateStmt(S->getBody(), TemplateArgs);
+ if (Body.isInvalid())
+ return SemaRef.StmtError();
+
+ return SemaRef.ActOnForStmt(S->getForLoc(), S->getLParenLoc(),
+ move(Init), move(Cond), move(Inc),
+ S->getRParenLoc(), move(Body));
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitAsmStmt(AsmStmt *S) {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate an 'asm' statement");
+ return SemaRef.StmtError();
+}
+
+//===----------------------------------------------------------------------===/
+// C++ statements
+//===----------------------------------------------------------------------===/
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitCXXTryStmt(CXXTryStmt *S) {
+ // Instantiate the try block itself.
+ OwningStmtResult TryBlock = VisitCompoundStmt(S->getTryBlock());
+ if (TryBlock.isInvalid())
+ return SemaRef.StmtError();
+
+ // Instantiate the handlers.
+ ASTOwningVector<&ActionBase::DeleteStmt> Handlers(SemaRef);
+ for (unsigned I = 0, N = S->getNumHandlers(); I != N; ++I) {
+ OwningStmtResult Handler = VisitCXXCatchStmt(S->getHandler(I));
+ if (Handler.isInvalid())
+ return SemaRef.StmtError();
+
+ Handlers.push_back(Handler.takeAs<Stmt>());
+ }
+
+ return SemaRef.ActOnCXXTryBlock(S->getTryLoc(), move(TryBlock),
+ move_arg(Handlers));
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ // Instantiate the exception declaration, if any.
+ VarDecl *Var = 0;
+ if (S->getExceptionDecl()) {
+ VarDecl *ExceptionDecl = S->getExceptionDecl();
+ QualType T = SemaRef.InstantiateType(ExceptionDecl->getType(),
+ TemplateArgs,
+ ExceptionDecl->getLocation(),
+ ExceptionDecl->getDeclName());
+ if (T.isNull())
+ return SemaRef.StmtError();
+
+ Var = SemaRef.BuildExceptionDeclaration(0, T,
+ ExceptionDecl->getIdentifier(),
+ ExceptionDecl->getLocation(),
+ /*FIXME: Inaccurate*/
+ SourceRange(ExceptionDecl->getLocation()));
+ if (Var->isInvalidDecl()) {
+ Var->Destroy(SemaRef.Context);
+ return SemaRef.StmtError();
+ }
+
+ // Introduce the exception declaration into scope.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
+ }
+
+ // Instantiate the actual exception handler.
+ OwningStmtResult Handler = Visit(S->getHandlerBlock());
+ if (Handler.isInvalid()) {
+ if (Var)
+ Var->Destroy(SemaRef.Context);
+ return SemaRef.StmtError();
+ }
+
+ return SemaRef.Owned(new (SemaRef.Context) CXXCatchStmt(S->getCatchLoc(),
+ Var,
+ Handler.takeAs<Stmt>()));
+}
+
+//===----------------------------------------------------------------------===/
+// Objective-C statements
+//===----------------------------------------------------------------------===/
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate an Objective-C @finally statement");
+ return SemaRef.StmtError();
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitObjCAtSynchronizedStmt(
+ ObjCAtSynchronizedStmt *S) {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate an Objective-C @synchronized statement");
+ return SemaRef.StmtError();
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate an Objective-C @try statement");
+ return SemaRef.StmtError();
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitObjCForCollectionStmt(
+ ObjCForCollectionStmt *S) {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate an Objective-C \"for\" statement");
+ return SemaRef.StmtError();
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate an Objective-C @throw statement");
+ return SemaRef.StmtError();
+}
+
+Sema::OwningStmtResult
+TemplateStmtInstantiator::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ // FIXME: Implement this
+ assert(false && "Cannot instantiate an Objective-C @catch statement");
+ return SemaRef.StmtError();
+}
+
+Sema::OwningStmtResult TemplateStmtInstantiator::VisitExpr(Expr *E) {
+ Sema::OwningExprResult Result = SemaRef.InstantiateExpr(E, TemplateArgs);
+ if (Result.isInvalid())
+ return SemaRef.StmtError();
+
+ return SemaRef.Owned(Result.takeAs<Stmt>());
+}
+
+Sema::OwningStmtResult
+Sema::InstantiateStmt(Stmt *S, const TemplateArgumentList &TemplateArgs) {
+ if (!S)
+ return Owned((Stmt *)0);
+
+ TemplateStmtInstantiator Instantiator(*this, TemplateArgs);
+ return Instantiator.Visit(S);
+}
+
+Sema::OwningStmtResult
+Sema::InstantiateCompoundStmt(CompoundStmt *S,
+ const TemplateArgumentList &TemplateArgs,
+ bool isStmtExpr) {
+ if (!S)
+ return Owned((Stmt *)0);
+
+ TemplateStmtInstantiator Instantiator(*this, TemplateArgs);
+ ASTOwningVector<&ActionBase::DeleteStmt> Statements(*this);
+ for (CompoundStmt::body_iterator B = S->body_begin(), BEnd = S->body_end();
+ B != BEnd; ++B) {
+ OwningStmtResult Result = Instantiator.Visit(*B);
+ if (Result.isInvalid())
+ return StmtError();
+
+ Statements.push_back(Result.takeAs<Stmt>());
+ }
+
+ return ActOnCompoundStmt(S->getLBracLoc(), S->getRBracLoc(),
+ move_arg(Statements), isStmtExpr);
+}
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
new file mode 100644
index 0000000..81ac211
--- /dev/null
+++ b/lib/Sema/SemaType.cpp
@@ -0,0 +1,1301 @@
+//===--- SemaType.cpp - Semantic Analysis for Types -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements type-related semantic analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/Parse/DeclSpec.h"
+using namespace clang;
+
+/// \brief Perform adjustment on the parameter type of a function.
+///
+/// This routine adjusts the given parameter type @p T to the actual
+/// parameter type used by semantic analysis (C99 6.7.5.3p[7,8],
+/// C++ [dcl.fct]p3). The adjusted parameter type is returned.
+QualType Sema::adjustParameterType(QualType T) {
+ // C99 6.7.5.3p7:
+ if (T->isArrayType()) {
+ // C99 6.7.5.3p7:
+ // A declaration of a parameter as "array of type" shall be
+ // adjusted to "qualified pointer to type", where the type
+ // qualifiers (if any) are those specified within the [ and ] of
+ // the array type derivation.
+ return Context.getArrayDecayedType(T);
+ } else if (T->isFunctionType())
+ // C99 6.7.5.3p8:
+ // A declaration of a parameter as "function returning type"
+ // shall be adjusted to "pointer to function returning type", as
+ // in 6.3.2.1.
+ return Context.getPointerType(T);
+
+ return T;
+}
+
+/// \brief Convert the specified declspec to the appropriate type
+/// object.
+/// \param DS the declaration specifiers
+/// \param DeclLoc The location of the declarator identifier or invalid if none.
+/// \returns The type described by the declaration specifiers. This function
+/// never returns null.
+QualType Sema::ConvertDeclSpecToType(const DeclSpec &DS,
+ SourceLocation DeclLoc,
+ bool &isInvalid) {
+ // FIXME: Should move the logic from DeclSpec::Finish to here for validity
+ // checking.
+ QualType Result;
+
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_void:
+ Result = Context.VoidTy;
+ break;
+ case DeclSpec::TST_char:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ Result = Context.CharTy;
+ else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed)
+ Result = Context.SignedCharTy;
+ else {
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
+ "Unknown TSS value");
+ Result = Context.UnsignedCharTy;
+ }
+ break;
+ case DeclSpec::TST_wchar:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ Result = Context.WCharTy;
+ else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) {
+ Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ << DS.getSpecifierName(DS.getTypeSpecType());
+ Result = Context.getSignedWCharType();
+ } else {
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
+ "Unknown TSS value");
+ Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ << DS.getSpecifierName(DS.getTypeSpecType());
+ Result = Context.getUnsignedWCharType();
+ }
+ break;
+ case DeclSpec::TST_unspecified:
+ // "<proto1,proto2>" is an objc qualified ID with a missing id.
+ if (DeclSpec::ProtocolQualifierListTy PQ = DS.getProtocolQualifiers()) {
+ Result = Context.getObjCQualifiedIdType((ObjCProtocolDecl**)PQ,
+ DS.getNumProtocolQualifiers());
+ break;
+ }
+
+ // Unspecified typespec defaults to int in C90. However, the C90 grammar
+ // [C90 6.5] only allows a decl-spec if there was *some* type-specifier,
+ // type-qualifier, or storage-class-specifier. If not, emit an extwarn.
+ // Note that the one exception to this is function definitions, which are
+ // allowed to be completely missing a declspec. This is handled in the
+ // parser already though by it pretending to have seen an 'int' in this
+ // case.
+ if (getLangOptions().ImplicitInt) {
+ // In C89 mode, we only warn if there is a completely missing declspec
+ // when one is not allowed.
+ if (DS.isEmpty()) {
+ if (DeclLoc.isInvalid())
+ DeclLoc = DS.getSourceRange().getBegin();
+ Diag(DeclLoc, diag::warn_missing_declspec)
+ << DS.getSourceRange()
+ << CodeModificationHint::CreateInsertion(DS.getSourceRange().getBegin(),
+ "int");
+ }
+ } else if (!DS.hasTypeSpecifier()) {
+ // C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
+ // "At least one type specifier shall be given in the declaration
+ // specifiers in each declaration, and in the specifier-qualifier list in
+ // each struct declaration and type name."
+ // FIXME: Does Microsoft really have the implicit int extension in C++?
+ if (DeclLoc.isInvalid())
+ DeclLoc = DS.getSourceRange().getBegin();
+
+ if (getLangOptions().CPlusPlus && !getLangOptions().Microsoft)
+ Diag(DeclLoc, diag::err_missing_type_specifier)
+ << DS.getSourceRange();
+ else
+ Diag(DeclLoc, diag::warn_missing_type_specifier)
+ << DS.getSourceRange();
+
+ // FIXME: If we could guarantee that the result would be well-formed, it
+ // would be useful to have a code insertion hint here. However, after
+ // emitting this warning/error, we often emit other errors.
+ }
+
+ // FALL THROUGH.
+ case DeclSpec::TST_int: {
+ if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_unspecified: Result = Context.IntTy; break;
+ case DeclSpec::TSW_short: Result = Context.ShortTy; break;
+ case DeclSpec::TSW_long: Result = Context.LongTy; break;
+ case DeclSpec::TSW_longlong: Result = Context.LongLongTy; break;
+ }
+ } else {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_unspecified: Result = Context.UnsignedIntTy; break;
+ case DeclSpec::TSW_short: Result = Context.UnsignedShortTy; break;
+ case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break;
+ case DeclSpec::TSW_longlong: Result =Context.UnsignedLongLongTy; break;
+ }
+ }
+ break;
+ }
+ case DeclSpec::TST_float: Result = Context.FloatTy; break;
+ case DeclSpec::TST_double:
+ if (DS.getTypeSpecWidth() == DeclSpec::TSW_long)
+ Result = Context.LongDoubleTy;
+ else
+ Result = Context.DoubleTy;
+ break;
+ case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool
+ case DeclSpec::TST_decimal32: // _Decimal32
+ case DeclSpec::TST_decimal64: // _Decimal64
+ case DeclSpec::TST_decimal128: // _Decimal128
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported);
+ Result = Context.IntTy;
+ isInvalid = true;
+ break;
+ case DeclSpec::TST_class:
+ case DeclSpec::TST_enum:
+ case DeclSpec::TST_union:
+ case DeclSpec::TST_struct: {
+ Decl *D = static_cast<Decl *>(DS.getTypeRep());
+ assert(D && "Didn't get a decl for a class/enum/union/struct?");
+ assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == 0 &&
+ "Can't handle qualifiers on typedef names yet!");
+ // TypeQuals handled by caller.
+ Result = Context.getTypeDeclType(cast<TypeDecl>(D));
+
+ if (D->isInvalidDecl())
+ isInvalid = true;
+ break;
+ }
+ case DeclSpec::TST_typename: {
+ assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == 0 &&
+ "Can't handle qualifiers on typedef names yet!");
+ Result = QualType::getFromOpaquePtr(DS.getTypeRep());
+
+ if (DeclSpec::ProtocolQualifierListTy PQ = DS.getProtocolQualifiers()) {
+ // FIXME: Adding a TST_objcInterface clause doesn't seem ideal, so we have
+ // this "hack" for now...
+ if (const ObjCInterfaceType *Interface = Result->getAsObjCInterfaceType())
+ Result = Context.getObjCQualifiedInterfaceType(Interface->getDecl(),
+ (ObjCProtocolDecl**)PQ,
+ DS.getNumProtocolQualifiers());
+ else if (Result == Context.getObjCIdType())
+ // id<protocol-list>
+ Result = Context.getObjCQualifiedIdType((ObjCProtocolDecl**)PQ,
+ DS.getNumProtocolQualifiers());
+ else if (Result == Context.getObjCClassType()) {
+ if (DeclLoc.isInvalid())
+ DeclLoc = DS.getSourceRange().getBegin();
+ // Class<protocol-list>
+ Diag(DeclLoc, diag::err_qualified_class_unsupported)
+ << DS.getSourceRange();
+ } else {
+ if (DeclLoc.isInvalid())
+ DeclLoc = DS.getSourceRange().getBegin();
+ Diag(DeclLoc, diag::err_invalid_protocol_qualifiers)
+ << DS.getSourceRange();
+ isInvalid = true;
+ }
+ }
+
+ // If this is a reference to an invalid typedef, propagate the invalidity.
+ if (TypedefType *TDT = dyn_cast<TypedefType>(Result))
+ if (TDT->getDecl()->isInvalidDecl())
+ isInvalid = true;
+
+ // TypeQuals handled by caller.
+ break;
+ }
+ case DeclSpec::TST_typeofType:
+ Result = QualType::getFromOpaquePtr(DS.getTypeRep());
+ assert(!Result.isNull() && "Didn't get a type for typeof?");
+ // TypeQuals handled by caller.
+ Result = Context.getTypeOfType(Result);
+ break;
+ case DeclSpec::TST_typeofExpr: {
+ Expr *E = static_cast<Expr *>(DS.getTypeRep());
+ assert(E && "Didn't get an expression for typeof?");
+ // TypeQuals handled by caller.
+ Result = Context.getTypeOfExprType(E);
+ break;
+ }
+ case DeclSpec::TST_error:
+ Result = Context.IntTy;
+ isInvalid = true;
+ break;
+ }
+
+ // Handle complex types.
+ if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
+ if (getLangOptions().Freestanding)
+ Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex);
+ Result = Context.getComplexType(Result);
+ }
+
+ assert(DS.getTypeSpecComplex() != DeclSpec::TSC_imaginary &&
+ "FIXME: imaginary types not supported yet!");
+
+ // See if there are any attributes on the declspec that apply to the type (as
+ // opposed to the decl).
+ if (const AttributeList *AL = DS.getAttributes())
+ ProcessTypeAttributeList(Result, AL);
+
+ // Apply const/volatile/restrict qualifiers to T.
+ if (unsigned TypeQuals = DS.getTypeQualifiers()) {
+
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from object
+ // or incomplete types shall not be restrict-qualified." C++ also allows
+ // restrict-qualified references.
+ if (TypeQuals & QualType::Restrict) {
+ if (Result->isPointerType() || Result->isReferenceType()) {
+ QualType EltTy = Result->isPointerType() ?
+ Result->getAsPointerType()->getPointeeType() :
+ Result->getAsReferenceType()->getPointeeType();
+
+ // If we have a pointer or reference, the pointee must have an object
+ // incomplete type.
+ if (!EltTy->isIncompleteOrObjectType()) {
+ Diag(DS.getRestrictSpecLoc(),
+ diag::err_typecheck_invalid_restrict_invalid_pointee)
+ << EltTy << DS.getSourceRange();
+ TypeQuals &= ~QualType::Restrict; // Remove the restrict qualifier.
+ }
+ } else {
+ Diag(DS.getRestrictSpecLoc(),
+ diag::err_typecheck_invalid_restrict_not_pointer)
+ << Result << DS.getSourceRange();
+ TypeQuals &= ~QualType::Restrict; // Remove the restrict qualifier.
+ }
+ }
+
+ // Warn about CV qualifiers on functions: C99 6.7.3p8: "If the specification
+ // of a function type includes any type qualifiers, the behavior is
+ // undefined."
+ if (Result->isFunctionType() && TypeQuals) {
+ // Get some location to point at, either the C or V location.
+ SourceLocation Loc;
+ if (TypeQuals & QualType::Const)
+ Loc = DS.getConstSpecLoc();
+ else {
+ assert((TypeQuals & QualType::Volatile) &&
+ "Has CV quals but not C or V?");
+ Loc = DS.getVolatileSpecLoc();
+ }
+ Diag(Loc, diag::warn_typecheck_function_qualifiers)
+ << Result << DS.getSourceRange();
+ }
+
+ // C++ [dcl.ref]p1:
+ // Cv-qualified references are ill-formed except when the
+ // cv-qualifiers are introduced through the use of a typedef
+ // (7.1.3) or of a template type argument (14.3), in which
+ // case the cv-qualifiers are ignored.
+ // FIXME: Shouldn't we be checking SCS_typedef here?
+ if (DS.getTypeSpecType() == DeclSpec::TST_typename &&
+ TypeQuals && Result->isReferenceType()) {
+ TypeQuals &= ~QualType::Const;
+ TypeQuals &= ~QualType::Volatile;
+ }
+
+ Result = Result.getQualifiedType(TypeQuals);
+ }
+ return Result;
+}
+
+static std::string getPrintableNameForEntity(DeclarationName Entity) {
+ if (Entity)
+ return Entity.getAsString();
+
+ return "type name";
+}
+
+/// \brief Build a pointer type.
+///
+/// \param T The type to which we'll be building a pointer.
+///
+/// \param Quals The cvr-qualifiers to be applied to the pointer type.
+///
+/// \param Loc The location of the entity whose type involves this
+/// pointer type or, if there is no such entity, the location of the
+/// type that will have pointer type.
+///
+/// \param Entity The name of the entity that involves the pointer
+/// type, if known.
+///
+/// \returns A suitable pointer type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildPointerType(QualType T, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity) {
+ if (T->isReferenceType()) {
+ // C++ 8.3.2p4: There shall be no ... pointers to references ...
+ Diag(Loc, diag::err_illegal_decl_pointer_to_reference)
+ << getPrintableNameForEntity(Entity);
+ return QualType();
+ }
+
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from
+ // object or incomplete types shall not be restrict-qualified."
+ if ((Quals & QualType::Restrict) && !T->isIncompleteOrObjectType()) {
+ Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee)
+ << T;
+ Quals &= ~QualType::Restrict;
+ }
+
+ // Build the pointer type.
+ return Context.getPointerType(T).getQualifiedType(Quals);
+}
+
+/// \brief Build a reference type.
+///
+/// \param T The type to which we'll be building a reference.
+///
+/// \param Quals The cvr-qualifiers to be applied to the reference type.
+///
+/// \param Loc The location of the entity whose type involves this
+/// reference type or, if there is no such entity, the location of the
+/// type that will have reference type.
+///
+/// \param Entity The name of the entity that involves the reference
+/// type, if known.
+///
+/// \returns A suitable reference type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildReferenceType(QualType T, bool LValueRef, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity) {
+ if (LValueRef) {
+ if (const RValueReferenceType *R = T->getAsRValueReferenceType()) {
+ // C++0x [dcl.typedef]p9: If a typedef TD names a type that is a
+ // reference to a type T, and attempt to create the type "lvalue
+ // reference to cv TD" creates the type "lvalue reference to T".
+ // We use the qualifiers (restrict or none) of the original reference,
+ // not the new ones. This is consistent with GCC.
+ return Context.getLValueReferenceType(R->getPointeeType()).
+ getQualifiedType(T.getCVRQualifiers());
+ }
+ }
+ if (T->isReferenceType()) {
+ // C++ [dcl.ref]p4: There shall be no references to references.
+ //
+ // According to C++ DR 106, references to references are only
+ // diagnosed when they are written directly (e.g., "int & &"),
+ // but not when they happen via a typedef:
+ //
+ // typedef int& intref;
+ // typedef intref& intref2;
+ //
+ // Parser::ParserDeclaratorInternal diagnoses the case where
+ // references are written directly; here, we handle the
+ // collapsing of references-to-references as described in C++
+ // DR 106 and amended by C++ DR 540.
+ return T;
+ }
+
+ // C++ [dcl.ref]p1:
+ // A declarator that specifies the type “reference to cv void”
+ // is ill-formed.
+ if (T->isVoidType()) {
+ Diag(Loc, diag::err_reference_to_void);
+ return QualType();
+ }
+
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from
+ // object or incomplete types shall not be restrict-qualified."
+ if ((Quals & QualType::Restrict) && !T->isIncompleteOrObjectType()) {
+ Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee)
+ << T;
+ Quals &= ~QualType::Restrict;
+ }
+
+ // C++ [dcl.ref]p1:
+ // [...] Cv-qualified references are ill-formed except when the
+ // cv-qualifiers are introduced through the use of a typedef
+ // (7.1.3) or of a template type argument (14.3), in which case
+ // the cv-qualifiers are ignored.
+ //
+ // We diagnose extraneous cv-qualifiers for the non-typedef,
+ // non-template type argument case within the parser. Here, we just
+ // ignore any extraneous cv-qualifiers.
+ Quals &= ~QualType::Const;
+ Quals &= ~QualType::Volatile;
+
+ // Handle restrict on references.
+ if (LValueRef)
+ return Context.getLValueReferenceType(T).getQualifiedType(Quals);
+ return Context.getRValueReferenceType(T).getQualifiedType(Quals);
+}
+
+/// \brief Build an array type.
+///
+/// \param T The type of each element in the array.
+///
+/// \param ASM C99 array size modifier (e.g., '*', 'static').
+///
+/// \param ArraySize Expression describing the size of the array.
+///
+/// \param Quals The cvr-qualifiers to be applied to the array's
+/// element type.
+///
+/// \param Loc The location of the entity whose type involves this
+/// array type or, if there is no such entity, the location of the
+/// type that will have array type.
+///
+/// \param Entity The name of the entity that involves the array
+/// type, if known.
+///
+/// \returns A suitable array type, if there are no errors. Otherwise,
+/// returns a NULL type.
+QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
+ Expr *ArraySize, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity) {
+ // C99 6.7.5.2p1: If the element type is an incomplete or function type,
+ // reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
+ if (RequireCompleteType(Loc, T,
+ diag::err_illegal_decl_array_incomplete_type))
+ return QualType();
+
+ if (T->isFunctionType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_functions)
+ << getPrintableNameForEntity(Entity);
+ return QualType();
+ }
+
+ // C++ 8.3.2p4: There shall be no ... arrays of references ...
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_references)
+ << getPrintableNameForEntity(Entity);
+ return QualType();
+ }
+
+ if (const RecordType *EltTy = T->getAsRecordType()) {
+ // If the element type is a struct or union that contains a variadic
+ // array, accept it as a GNU extension: C99 6.7.2.1p2.
+ if (EltTy->getDecl()->hasFlexibleArrayMember())
+ Diag(Loc, diag::ext_flexible_array_in_array) << T;
+ } else if (T->isObjCInterfaceType()) {
+ Diag(Loc, diag::err_objc_array_of_interfaces) << T;
+ return QualType();
+ }
+
+ // C99 6.7.5.2p1: The size expression shall have integer type.
+ if (ArraySize && !ArraySize->isTypeDependent() &&
+ !ArraySize->getType()->isIntegerType()) {
+ Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
+ ArraySize->Destroy(Context);
+ return QualType();
+ }
+ llvm::APSInt ConstVal(32);
+ if (!ArraySize) {
+ if (ASM == ArrayType::Star)
+ T = Context.getVariableArrayType(T, 0, ASM, Quals);
+ else
+ T = Context.getIncompleteArrayType(T, ASM, Quals);
+ } else if (ArraySize->isValueDependent()) {
+ T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals);
+ } else if (!ArraySize->isIntegerConstantExpr(ConstVal, Context) ||
+ (!T->isDependentType() && !T->isConstantSizeType())) {
+ // Per C99, a variable array is an array with either a non-constant
+ // size or an element type that has a non-constant-size
+ T = Context.getVariableArrayType(T, ArraySize, ASM, Quals);
+ } else {
+ // C99 6.7.5.2p1: If the expression is a constant expression, it shall
+ // have a value greater than zero.
+ if (ConstVal.isSigned()) {
+ if (ConstVal.isNegative()) {
+ Diag(ArraySize->getLocStart(),
+ diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange();
+ return QualType();
+ } else if (ConstVal == 0) {
+ // GCC accepts zero sized static arrays.
+ Diag(ArraySize->getLocStart(), diag::ext_typecheck_zero_array_size)
+ << ArraySize->getSourceRange();
+ }
+ }
+ T = Context.getConstantArrayType(T, ConstVal, ASM, Quals);
+ }
+ // If this is not C99, extwarn about VLA's and C99 array size modifiers.
+ if (!getLangOptions().C99) {
+ if (ArraySize && !ArraySize->isTypeDependent() &&
+ !ArraySize->isValueDependent() &&
+ !ArraySize->isIntegerConstantExpr(Context))
+ Diag(Loc, diag::ext_vla);
+ else if (ASM != ArrayType::Normal || Quals != 0)
+ Diag(Loc, diag::ext_c99_array_usage);
+ }
+
+ return T;
+}
+
+/// \brief Build a function type.
+///
+/// This routine checks the function type according to C++ rules and
+/// under the assumption that the result type and parameter types have
+/// just been instantiated from a template. It therefore duplicates
+/// some of the behavior of GetTypeForDeclarator, but in a much
+/// simpler form that is only suitable for this narrow use case.
+///
+/// \param T The return type of the function.
+///
+/// \param ParamTypes The parameter types of the function. This array
+/// will be modified to account for adjustments to the types of the
+/// function parameters.
+///
+/// \param NumParamTypes The number of parameter types in ParamTypes.
+///
+/// \param Variadic Whether this is a variadic function type.
+///
+/// \param Quals The cvr-qualifiers to be applied to the function type.
+///
+/// \param Loc The location of the entity whose type involves this
+/// function type or, if there is no such entity, the location of the
+/// type that will have function type.
+///
+/// \param Entity The name of the entity that involves the function
+/// type, if known.
+///
+/// \returns A suitable function type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildFunctionType(QualType T,
+ QualType *ParamTypes,
+ unsigned NumParamTypes,
+ bool Variadic, unsigned Quals,
+ SourceLocation Loc, DeclarationName Entity) {
+ if (T->isArrayType() || T->isFunctionType()) {
+ Diag(Loc, diag::err_func_returning_array_function) << T;
+ return QualType();
+ }
+
+ bool Invalid = false;
+ for (unsigned Idx = 0; Idx < NumParamTypes; ++Idx) {
+ QualType ParamType = adjustParameterType(ParamTypes[Idx]);
+ if (ParamType->isVoidType()) {
+ Diag(Loc, diag::err_param_with_void_type);
+ Invalid = true;
+ }
+
+ ParamTypes[Idx] = ParamType;
+ }
+
+ if (Invalid)
+ return QualType();
+
+ return Context.getFunctionType(T, ParamTypes, NumParamTypes, Variadic,
+ Quals);
+}
+
+/// GetTypeForDeclarator - Convert the type for the specified
+/// declarator to Type instances. Skip the outermost Skip type
+/// objects.
+///
+/// If OwnedDecl is non-NULL, and this declarator's decl-specifier-seq
+/// owns the declaration of a type (e.g., the definition of a struct
+/// type), then *OwnedDecl will receive the owned declaration.
+QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, unsigned Skip,
+ TagDecl **OwnedDecl) {
+ bool OmittedReturnType = false;
+
+ if (D.getContext() == Declarator::BlockLiteralContext
+ && Skip == 0
+ && !D.getDeclSpec().hasTypeSpecifier()
+ && (D.getNumTypeObjects() == 0
+ || (D.getNumTypeObjects() == 1
+ && D.getTypeObject(0).Kind == DeclaratorChunk::Function)))
+ OmittedReturnType = true;
+
+ // long long is a C99 feature.
+ if (!getLangOptions().C99 && !getLangOptions().CPlusPlus0x &&
+ D.getDeclSpec().getTypeSpecWidth() == DeclSpec::TSW_longlong)
+ Diag(D.getDeclSpec().getTypeSpecWidthLoc(), diag::ext_longlong);
+
+ // Determine the type of the declarator. Not all forms of declarator
+ // have a type.
+ QualType T;
+ switch (D.getKind()) {
+ case Declarator::DK_Abstract:
+ case Declarator::DK_Normal:
+ case Declarator::DK_Operator: {
+ const DeclSpec &DS = D.getDeclSpec();
+ if (OmittedReturnType) {
+ // We default to a dependent type initially. Can be modified by
+ // the first return statement.
+ T = Context.DependentTy;
+ } else {
+ bool isInvalid = false;
+ T = ConvertDeclSpecToType(DS, D.getIdentifierLoc(), isInvalid);
+ if (isInvalid)
+ D.setInvalidType(true);
+ else if (OwnedDecl && DS.isTypeSpecOwned())
+ *OwnedDecl = cast<TagDecl>((Decl *)DS.getTypeRep());
+ }
+ break;
+ }
+
+ case Declarator::DK_Constructor:
+ case Declarator::DK_Destructor:
+ case Declarator::DK_Conversion:
+ // Constructors and destructors don't have return types. Use
+ // "void" instead. Conversion operators will check their return
+ // types separately.
+ T = Context.VoidTy;
+ break;
+ }
+
+ // The name we're declaring, if any.
+ DeclarationName Name;
+ if (D.getIdentifier())
+ Name = D.getIdentifier();
+
+ // Walk the DeclTypeInfo, building the recursive type as we go.
+ // DeclTypeInfos are ordered from the identifier out, which is
+ // opposite of what we want :).
+ for (unsigned i = Skip, e = D.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &DeclType = D.getTypeObject(e-i-1+Skip);
+ switch (DeclType.Kind) {
+ default: assert(0 && "Unknown decltype!");
+ case DeclaratorChunk::BlockPointer:
+ // If blocks are disabled, emit an error.
+ if (!LangOpts.Blocks)
+ Diag(DeclType.Loc, diag::err_blocks_disable);
+
+ if (!T.getTypePtr()->isFunctionType())
+ Diag(D.getIdentifierLoc(), diag::err_nonfunction_block_type);
+ else
+ T = (Context.getBlockPointerType(T)
+ .getQualifiedType(DeclType.Cls.TypeQuals));
+ break;
+ case DeclaratorChunk::Pointer:
+ // Verify that we're not building a pointer to pointer to function with
+ // exception specification.
+ if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
+ Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ T = BuildPointerType(T, DeclType.Ptr.TypeQuals, DeclType.Loc, Name);
+ break;
+ case DeclaratorChunk::Reference:
+ // Verify that we're not building a reference to pointer to function with
+ // exception specification.
+ if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
+ Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ T = BuildReferenceType(T, DeclType.Ref.LValueRef,
+ DeclType.Ref.HasRestrict ? QualType::Restrict : 0,
+ DeclType.Loc, Name);
+ break;
+ case DeclaratorChunk::Array: {
+ // Verify that we're not building an array of pointers to function with
+ // exception specification.
+ if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
+ Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr;
+ Expr *ArraySize = static_cast<Expr*>(ATI.NumElts);
+ ArrayType::ArraySizeModifier ASM;
+ if (ATI.isStar)
+ ASM = ArrayType::Star;
+ else if (ATI.hasStatic)
+ ASM = ArrayType::Static;
+ else
+ ASM = ArrayType::Normal;
+ if (ASM == ArrayType::Star &&
+ D.getContext() != Declarator::PrototypeContext) {
+ // FIXME: This check isn't quite right: it allows star in prototypes
+ // for function definitions, and disallows some edge cases detailed
+ // in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html
+ Diag(DeclType.Loc, diag::err_array_star_outside_prototype);
+ ASM = ArrayType::Normal;
+ D.setInvalidType(true);
+ }
+ T = BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals, DeclType.Loc, Name);
+ break;
+ }
+ case DeclaratorChunk::Function: {
+ // If the function declarator has a prototype (i.e. it is not () and
+ // does not have a K&R-style identifier list), then the arguments are part
+ // of the type, otherwise the argument list is ().
+ const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+
+ // C99 6.7.5.3p1: The return type may not be a function or array type.
+ if (T->isArrayType() || T->isFunctionType()) {
+ Diag(DeclType.Loc, diag::err_func_returning_array_function) << T;
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ }
+
+ if (getLangOptions().CPlusPlus && D.getDeclSpec().isTypeSpecOwned()) {
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ TagDecl *Tag = cast<TagDecl>((Decl *)D.getDeclSpec().getTypeRep());
+ if (Tag->isDefinition())
+ Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
+ << Context.getTypeDeclType(Tag);
+ }
+
+ // Exception specs are not allowed in typedefs. Complain, but add it
+ // anyway.
+ if (FTI.hasExceptionSpec &&
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ Diag(FTI.getThrowLoc(), diag::err_exception_spec_in_typedef);
+
+ if (FTI.NumArgs == 0) {
+ if (getLangOptions().CPlusPlus) {
+ // C++ 8.3.5p2: If the parameter-declaration-clause is empty, the
+ // function takes no arguments.
+ llvm::SmallVector<QualType, 4> Exceptions;
+ Exceptions.reserve(FTI.NumExceptions);
+ for(unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) {
+ QualType ET = QualType::getFromOpaquePtr(FTI.Exceptions[ei].Ty);
+ // Check that the type is valid for an exception spec, and drop it
+ // if not.
+ if (!CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
+ Exceptions.push_back(ET);
+ }
+ T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic, FTI.TypeQuals,
+ FTI.hasExceptionSpec,
+ FTI.hasAnyExceptionSpec,
+ Exceptions.size(), Exceptions.data());
+ } else if (FTI.isVariadic) {
+ // We allow a zero-parameter variadic function in C if the
+ // function is marked with the "overloadable"
+ // attribute. Scan for this attribute now.
+ bool Overloadable = false;
+ for (const AttributeList *Attrs = D.getAttributes();
+ Attrs; Attrs = Attrs->getNext()) {
+ if (Attrs->getKind() == AttributeList::AT_overloadable) {
+ Overloadable = true;
+ break;
+ }
+ }
+
+ if (!Overloadable)
+ Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg);
+ T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic, 0);
+ } else {
+ // Simple void foo(), where the incoming T is the result type.
+ T = Context.getFunctionNoProtoType(T);
+ }
+ } else if (FTI.ArgInfo[0].Param == 0) {
+ // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function definition.
+ Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);
+ } else {
+ // Otherwise, we have a function with an argument list that is
+ // potentially variadic.
+ llvm::SmallVector<QualType, 16> ArgTys;
+
+ for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
+ ParmVarDecl *Param =
+ cast<ParmVarDecl>(FTI.ArgInfo[i].Param.getAs<Decl>());
+ QualType ArgTy = Param->getType();
+ assert(!ArgTy.isNull() && "Couldn't parse type?");
+
+ // Adjust the parameter type.
+ assert((ArgTy == adjustParameterType(ArgTy)) && "Unadjusted type?");
+
+ // Look for 'void'. void is allowed only as a single argument to a
+ // function with no other parameters (C99 6.7.5.3p10). We record
+ // int(void) as a FunctionProtoType with an empty argument list.
+ if (ArgTy->isVoidType()) {
+ // If this is something like 'float(int, void)', reject it. 'void'
+ // is an incomplete type (C99 6.2.5p19) and function decls cannot
+ // have arguments of incomplete type.
+ if (FTI.NumArgs != 1 || FTI.isVariadic) {
+ Diag(DeclType.Loc, diag::err_void_only_param);
+ ArgTy = Context.IntTy;
+ Param->setType(ArgTy);
+ } else if (FTI.ArgInfo[i].Ident) {
+ // Reject, but continue to parse 'int(void abc)'.
+ Diag(FTI.ArgInfo[i].IdentLoc,
+ diag::err_param_with_void_type);
+ ArgTy = Context.IntTy;
+ Param->setType(ArgTy);
+ } else {
+ // Reject, but continue to parse 'float(const void)'.
+ if (ArgTy.getCVRQualifiers())
+ Diag(DeclType.Loc, diag::err_void_param_qualified);
+
+ // Do not add 'void' to the ArgTys list.
+ break;
+ }
+ } else if (!FTI.hasPrototype) {
+ if (ArgTy->isPromotableIntegerType()) {
+ ArgTy = Context.IntTy;
+ } else if (const BuiltinType* BTy = ArgTy->getAsBuiltinType()) {
+ if (BTy->getKind() == BuiltinType::Float)
+ ArgTy = Context.DoubleTy;
+ }
+ }
+
+ ArgTys.push_back(ArgTy);
+ }
+
+ llvm::SmallVector<QualType, 4> Exceptions;
+ Exceptions.reserve(FTI.NumExceptions);
+ for(unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) {
+ QualType ET = QualType::getFromOpaquePtr(FTI.Exceptions[ei].Ty);
+ // Check that the type is valid for an exception spec, and drop it if
+ // not.
+ if (!CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
+ Exceptions.push_back(ET);
+ }
+
+ T = Context.getFunctionType(T, ArgTys.data(), ArgTys.size(),
+ FTI.isVariadic, FTI.TypeQuals,
+ FTI.hasExceptionSpec,
+ FTI.hasAnyExceptionSpec,
+ Exceptions.size(), Exceptions.data());
+ }
+ break;
+ }
+ case DeclaratorChunk::MemberPointer:
+ // Verify that we're not building a pointer to pointer to function with
+ // exception specification.
+ if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) {
+ Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ // The scope spec must refer to a class, or be dependent.
+ DeclContext *DC = computeDeclContext(DeclType.Mem.Scope());
+ QualType ClsType;
+ // FIXME: Extend for dependent types when it's actually supported.
+ // See ActOnCXXNestedNameSpecifier.
+ if (CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(DC)) {
+ ClsType = Context.getTagDeclType(RD);
+ } else {
+ if (DC) {
+ Diag(DeclType.Mem.Scope().getBeginLoc(),
+ diag::err_illegal_decl_mempointer_in_nonclass)
+ << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name")
+ << DeclType.Mem.Scope().getRange();
+ }
+ D.setInvalidType(true);
+ ClsType = Context.IntTy;
+ }
+
+ // C++ 8.3.3p3: A pointer to member shall not pointer to ... a member
+ // with reference type, or "cv void."
+ if (T->isReferenceType()) {
+ Diag(DeclType.Loc, diag::err_illegal_decl_pointer_to_reference)
+ << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name");
+ D.setInvalidType(true);
+ T = Context.IntTy;
+ }
+ if (T->isVoidType()) {
+ Diag(DeclType.Loc, diag::err_illegal_decl_mempointer_to_void)
+ << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name");
+ T = Context.IntTy;
+ }
+
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from
+ // object or incomplete types shall not be restrict-qualified."
+ if ((DeclType.Mem.TypeQuals & QualType::Restrict) &&
+ !T->isIncompleteOrObjectType()) {
+ Diag(DeclType.Loc, diag::err_typecheck_invalid_restrict_invalid_pointee)
+ << T;
+ DeclType.Mem.TypeQuals &= ~QualType::Restrict;
+ }
+
+ T = Context.getMemberPointerType(T, ClsType.getTypePtr()).
+ getQualifiedType(DeclType.Mem.TypeQuals);
+
+ break;
+ }
+
+ if (T.isNull()) {
+ D.setInvalidType(true);
+ T = Context.IntTy;
+ }
+
+ // See if there are any attributes on this declarator chunk.
+ if (const AttributeList *AL = DeclType.getAttrs())
+ ProcessTypeAttributeList(T, AL);
+ }
+
+ if (getLangOptions().CPlusPlus && T->isFunctionType()) {
+ const FunctionProtoType *FnTy = T->getAsFunctionProtoType();
+ assert(FnTy && "Why oh why is there not a FunctionProtoType here ?");
+
+ // C++ 8.3.5p4: A cv-qualifier-seq shall only be part of the function type
+ // for a nonstatic member function, the function type to which a pointer
+ // to member refers, or the top-level function type of a function typedef
+ // declaration.
+ if (FnTy->getTypeQuals() != 0 &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ ((D.getContext() != Declarator::MemberContext &&
+ (!D.getCXXScopeSpec().isSet() ||
+ !computeDeclContext(D.getCXXScopeSpec())->isRecord())) ||
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)) {
+ if (D.isFunctionDeclarator())
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_function_type);
+ else
+ Diag(D.getIdentifierLoc(),
+ diag::err_invalid_qualified_typedef_function_type_use);
+
+ // Strip the cv-quals from the type.
+ T = Context.getFunctionType(FnTy->getResultType(), FnTy->arg_type_begin(),
+ FnTy->getNumArgs(), FnTy->isVariadic(), 0);
+ }
+ }
+
+ // If there were any type attributes applied to the decl itself (not the
+ // type, apply the type attribute to the type!)
+ if (const AttributeList *Attrs = D.getAttributes())
+ ProcessTypeAttributeList(T, Attrs);
+
+ return T;
+}
+
+/// CheckSpecifiedExceptionType - Check if the given type is valid in an
+/// exception specification. Incomplete types, or pointers to incomplete types
+/// other than void are not allowed.
+bool Sema::CheckSpecifiedExceptionType(QualType T, const SourceRange &Range) {
+ // FIXME: This may not correctly work with the fix for core issue 437,
+ // where a class's own type is considered complete within its body.
+
+ // C++ 15.4p2: A type denoted in an exception-specification shall not denote
+ // an incomplete type.
+ if (T->isIncompleteType())
+ return Diag(Range.getBegin(), diag::err_incomplete_in_exception_spec)
+ << Range << T << /*direct*/0;
+
+ // C++ 15.4p2: A type denoted in an exception-specification shall not denote
+ // an incomplete type a pointer or reference to an incomplete type, other
+ // than (cv) void*.
+ int kind;
+ if (const PointerType* IT = T->getAsPointerType()) {
+ T = IT->getPointeeType();
+ kind = 1;
+ } else if (const ReferenceType* IT = T->getAsReferenceType()) {
+ T = IT->getPointeeType();
+ kind = 2;
+ } else
+ return false;
+
+ if (T->isIncompleteType() && !T->isVoidType())
+ return Diag(Range.getBegin(), diag::err_incomplete_in_exception_spec)
+ << Range << T << /*indirect*/kind;
+
+ return false;
+}
+
+/// CheckDistantExceptionSpec - Check if the given type is a pointer or pointer
+/// to member to a function with an exception specification. This means that
+/// it is invalid to add another level of indirection.
+bool Sema::CheckDistantExceptionSpec(QualType T) {
+ if (const PointerType *PT = T->getAsPointerType())
+ T = PT->getPointeeType();
+ else if (const MemberPointerType *PT = T->getAsMemberPointerType())
+ T = PT->getPointeeType();
+ else
+ return false;
+
+ const FunctionProtoType *FnT = T->getAsFunctionProtoType();
+ if (!FnT)
+ return false;
+
+ return FnT->hasExceptionSpec();
+}
+
+/// ObjCGetTypeForMethodDefinition - Builds the type for a method definition
+/// declarator
+QualType Sema::ObjCGetTypeForMethodDefinition(DeclPtrTy D) {
+ ObjCMethodDecl *MDecl = cast<ObjCMethodDecl>(D.getAs<Decl>());
+ QualType T = MDecl->getResultType();
+ llvm::SmallVector<QualType, 16> ArgTys;
+
+ // Add the first two invisible argument types for self and _cmd.
+ if (MDecl->isInstanceMethod()) {
+ QualType selfTy = Context.getObjCInterfaceType(MDecl->getClassInterface());
+ selfTy = Context.getPointerType(selfTy);
+ ArgTys.push_back(selfTy);
+ } else
+ ArgTys.push_back(Context.getObjCIdType());
+ ArgTys.push_back(Context.getObjCSelType());
+
+ for (ObjCMethodDecl::param_iterator PI = MDecl->param_begin(),
+ E = MDecl->param_end(); PI != E; ++PI) {
+ QualType ArgTy = (*PI)->getType();
+ assert(!ArgTy.isNull() && "Couldn't parse type?");
+ ArgTy = adjustParameterType(ArgTy);
+ ArgTys.push_back(ArgTy);
+ }
+ T = Context.getFunctionType(T, &ArgTys[0], ArgTys.size(),
+ MDecl->isVariadic(), 0);
+ return T;
+}
+
+/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
+/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
+/// they point to and return true. If T1 and T2 aren't pointer types
+/// or pointer-to-member types, or if they are not similar at this
+/// level, returns false and leaves T1 and T2 unchanged. Top-level
+/// qualifiers on T1 and T2 are ignored. This function will typically
+/// be called in a loop that successively "unwraps" pointer and
+/// pointer-to-member types to compare them at each level.
+bool Sema::UnwrapSimilarPointerTypes(QualType& T1, QualType& T2) {
+ const PointerType *T1PtrType = T1->getAsPointerType(),
+ *T2PtrType = T2->getAsPointerType();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+
+ const MemberPointerType *T1MPType = T1->getAsMemberPointerType(),
+ *T2MPType = T2->getAsMemberPointerType();
+ if (T1MPType && T2MPType &&
+ Context.getCanonicalType(T1MPType->getClass()) ==
+ Context.getCanonicalType(T2MPType->getClass())) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+ return false;
+}
+
+Sema::TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
+ // C99 6.7.6: Type names have no identifier. This is already validated by
+ // the parser.
+ assert(D.getIdentifier() == 0 && "Type name should have no identifier!");
+
+ TagDecl *OwnedTag = 0;
+ QualType T = GetTypeForDeclarator(D, S, /*Skip=*/0, &OwnedTag);
+ if (D.isInvalidType())
+ return true;
+
+ if (getLangOptions().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+
+ // C++0x [dcl.type]p3:
+ // A type-specifier-seq shall not define a class or enumeration
+ // unless it appears in the type-id of an alias-declaration
+ // (7.1.3).
+ if (OwnedTag && OwnedTag->isDefinition())
+ Diag(OwnedTag->getLocation(), diag::err_type_defined_in_type_specifier)
+ << Context.getTypeDeclType(OwnedTag);
+ }
+
+ return T.getAsOpaquePtr();
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Type Attribute Processing
+//===----------------------------------------------------------------------===//
+
+/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
+/// specified type. The attribute contains 1 argument, the id of the address
+/// space for the type.
+static void HandleAddressSpaceTypeAttribute(QualType &Type,
+ const AttributeList &Attr, Sema &S){
+ // If this type is already address space qualified, reject it.
+ // Clause 6.7.3 - Type qualifiers: "No type shall be qualified by qualifiers
+ // for two or more different address spaces."
+ if (Type.getAddressSpace()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_multiple_qualifiers);
+ return;
+ }
+
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ Expr *ASArgExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt addrSpace(32);
+ if (!ASArgExpr->isIntegerConstantExpr(addrSpace, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_space_not_int)
+ << ASArgExpr->getSourceRange();
+ return;
+ }
+
+ unsigned ASIdx = static_cast<unsigned>(addrSpace.getZExtValue());
+ Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
+}
+
+/// HandleObjCGCTypeAttribute - Process an objc's gc attribute on the
+/// specified type. The attribute contains 1 argument, weak or strong.
+static void HandleObjCGCTypeAttribute(QualType &Type,
+ const AttributeList &Attr, Sema &S) {
+ if (Type.getObjCGCAttr() != QualType::GCNone) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_multiple_objc_gc);
+ return;
+ }
+
+ // Check the attribute arguments.
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "objc_gc" << 1;
+ return;
+ }
+ QualType::GCAttrTypes GCAttr;
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ if (Attr.getParameterName()->isStr("weak"))
+ GCAttr = QualType::Weak;
+ else if (Attr.getParameterName()->isStr("strong"))
+ GCAttr = QualType::Strong;
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "objc_gc" << Attr.getParameterName();
+ return;
+ }
+
+ Type = S.Context.getObjCGCQualType(Type, GCAttr);
+}
+
+void Sema::ProcessTypeAttributeList(QualType &Result, const AttributeList *AL) {
+ // Scan through and apply attributes to this type where it makes sense. Some
+ // attributes (such as __address_space__, __vector_size__, etc) apply to the
+ // type, but others can be present in the type specifiers even though they
+ // apply to the decl. Here we apply type attributes and ignore the rest.
+ for (; AL; AL = AL->getNext()) {
+ // If this is an attribute we can handle, do so now, otherwise, add it to
+ // the LeftOverAttrs list for rechaining.
+ switch (AL->getKind()) {
+ default: break;
+ case AttributeList::AT_address_space:
+ HandleAddressSpaceTypeAttribute(Result, *AL, *this);
+ break;
+ case AttributeList::AT_objc_gc:
+ HandleObjCGCTypeAttribute(Result, *AL, *this);
+ break;
+ }
+ }
+}
+
+/// @brief Ensure that the type T is a complete type.
+///
+/// This routine checks whether the type @p T is complete in any
+/// context where a complete type is required. If @p T is a complete
+/// type, returns false. If @p T is a class template specialization,
+/// this routine then attempts to perform class template
+/// instantiation. If instantiation fails, or if @p T is incomplete
+/// and cannot be completed, issues the diagnostic @p diag (giving it
+/// the type @p T) and returns true.
+///
+/// @param Loc The location in the source that the incomplete type
+/// diagnostic should refer to.
+///
+/// @param T The type that this routine is examining for completeness.
+///
+/// @param diag The diagnostic value (e.g.,
+/// @c diag::err_typecheck_decl_incomplete_type) that will be used
+/// for the error message if @p T is incomplete.
+///
+/// @param Range1 An optional range in the source code that will be a
+/// part of the "incomplete type" error message.
+///
+/// @param Range2 An optional range in the source code that will be a
+/// part of the "incomplete type" error message.
+///
+/// @param PrintType If non-NULL, the type that should be printed
+/// instead of @p T. This parameter should be used when the type that
+/// we're checking for incompleteness isn't the type that should be
+/// displayed to the user, e.g., when T is a type and PrintType is a
+/// pointer to T.
+///
+/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
+/// @c false otherwise.
+bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, unsigned diag,
+ SourceRange Range1, SourceRange Range2,
+ QualType PrintType) {
+ // FIXME: Add this assertion to help us flush out problems with
+ // checking for dependent types and type-dependent expressions.
+ //
+ // assert(!T->isDependentType() &&
+ // "Can't ask whether a dependent type is complete");
+
+ // If we have a complete type, we're done.
+ if (!T->isIncompleteType())
+ return false;
+
+ // If we have a class template specialization or a class member of a
+ // class template specialization, try to instantiate it.
+ if (const RecordType *Record = T->getAsRecordType()) {
+ if (ClassTemplateSpecializationDecl *ClassTemplateSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
+ if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) {
+ // Update the class template specialization's location to
+ // refer to the point of instantiation.
+ if (Loc.isValid())
+ ClassTemplateSpec->setLocation(Loc);
+ return InstantiateClassTemplateSpecialization(ClassTemplateSpec,
+ /*ExplicitInstantiation=*/false);
+ }
+ } else if (CXXRecordDecl *Rec
+ = dyn_cast<CXXRecordDecl>(Record->getDecl())) {
+ if (CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass()) {
+ // Find the class template specialization that surrounds this
+ // member class.
+ ClassTemplateSpecializationDecl *Spec = 0;
+ for (DeclContext *Parent = Rec->getDeclContext();
+ Parent && !Spec; Parent = Parent->getParent())
+ Spec = dyn_cast<ClassTemplateSpecializationDecl>(Parent);
+ assert(Spec && "Not a member of a class template specialization?");
+ return InstantiateClass(Loc, Rec, Pattern, Spec->getTemplateArgs(),
+ /*ExplicitInstantiation=*/false);
+ }
+ }
+ }
+
+ if (PrintType.isNull())
+ PrintType = T;
+
+ // We have an incomplete type. Produce a diagnostic.
+ Diag(Loc, diag) << PrintType << Range1 << Range2;
+
+ // If the type was a forward declaration of a class/struct/union
+ // type, produce
+ const TagType *Tag = 0;
+ if (const RecordType *Record = T->getAsRecordType())
+ Tag = Record;
+ else if (const EnumType *Enum = T->getAsEnumType())
+ Tag = Enum;
+
+ if (Tag && !Tag->getDecl()->isInvalidDecl())
+ Diag(Tag->getDecl()->getLocation(),
+ Tag->isBeingDefined() ? diag::note_type_being_defined
+ : diag::note_forward_declaration)
+ << QualType(Tag, 0);
+
+ return true;
+}
+
+/// \brief Retrieve a version of the type 'T' that is qualified by the
+/// nested-name-specifier contained in SS.
+QualType Sema::getQualifiedNameType(const CXXScopeSpec &SS, QualType T) {
+ if (!SS.isSet() || SS.isInvalid() || T.isNull())
+ return T;
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ return Context.getQualifiedNameType(NNS, T);
+}
OpenPOWER on IntegriCloud